langchain: InvalidRequestError: Unrecognized request arguments supplied: function_call, functions

Issue with current documentation:

I am following the instruction from this doc: DOC: Structure answers with OpenAI functions

-https://python.langchain.com/docs/use_cases/question_answering/integrations/openai_functions_retrieval_qa

But this gives some error.

Code

from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma

from langchain.chat_models import AzureChatOpenAI
from langchain.chat_models import ChatOpenAI
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain.chains import create_qa_with_sources_chain

path_txt = r"C:\Users\a126291\OneDrive - AmerisourceBergen(ABC)\data\langchain\state_of_the_union.txt"

def get_config_dict():
    import os
    import yaml

    with open(os.path.expanduser('~/.config/config.yaml')) as fh:
        config = yaml.safe_load(fh)

    # openai
    keys = ["OPENAI_API_KEY","OPENAI_API_TYPE","OPENAI_API_BASE","OPENAI_API_VERSION"]
    for key in keys:
        os.environ[key] = config.get(key)

    return config

config = get_config_dict()

#========= qa chain
loader = TextLoader(path_txt, encoding="utf-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)

for i, text in enumerate(texts):
    text.metadata["source"] = f"{i}-pl"
    

embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1)

docsearch = Chroma.from_documents(texts, embeddings)

# vectorstore = Chroma.from_documents(texts, embeddings)
# retriever = vectorstore.as_retriever()

llm = AzureChatOpenAI(**config['kw_azure_llm'],temperature=0.4)

#------- query
qa_chain = create_qa_with_sources_chain(llm)

doc_prompt = PromptTemplate(
    template="Content: {page_content}\nSource: {source}",
    input_variables=["page_content", "source"],
)

final_qa_chain = StuffDocumentsChain(
    llm_chain=qa_chain,
    document_variable_name="context",
    document_prompt=doc_prompt,
)

retrieval_qa = RetrievalQA(
    retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
)

query = "What did the president say about russia"
retrieval_qa.run(query)

Error: InvalidRequestError: Unrecognized request arguments supplied: function_call, functions

---------------------------------------------------------------------------
InvalidRequestError                       Traceback (most recent call last)
Cell In[36], line 69
     64 retrieval_qa = RetrievalQA(
     65     retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
     66 )
     68 query = "What did the president say about russia"
---> 69 retrieval_qa.run(query)

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:290, in Chain.run(self, callbacks, tags, *args, **kwargs)
    288     if len(args) != 1:
    289         raise ValueError("`run` supports only one positional argument.")
--> 290     return self(args[0], callbacks=callbacks, tags=tags)[_output_key]
    292 if kwargs and not args:
    293     return self(kwargs, callbacks=callbacks, tags=tags)[_output_key]

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:166, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)
--> 166     raise e
    167 run_manager.on_chain_end(outputs)
    168 final_outputs: Dict[str, Any] = self.prep_outputs(
    169     inputs, outputs, return_only_outputs
    170 )

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:160, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    154 run_manager = callback_manager.on_chain_start(
    155     dumpd(self),
    156     inputs,
    157 )
    158 try:
    159     outputs = (
--> 160         self._call(inputs, run_manager=run_manager)
    161         if new_arg_supported
    162         else self._call(inputs)
    163     )
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chains\retrieval_qa\base.py:120, in BaseRetrievalQA._call(self, inputs, run_manager)
    117 question = inputs[self.input_key]
    119 docs = self._get_docs(question)
--> 120 answer = self.combine_documents_chain.run(
    121     input_documents=docs, question=question, callbacks=_run_manager.get_child()
    122 )
    124 if self.return_source_documents:
    125     return {self.output_key: answer, "source_documents": docs}

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:293, in Chain.run(self, callbacks, tags, *args, **kwargs)
    290     return self(args[0], callbacks=callbacks, tags=tags)[_output_key]
    292 if kwargs and not args:
--> 293     return self(kwargs, callbacks=callbacks, tags=tags)[_output_key]
    295 if not kwargs and not args:
    296     raise ValueError(
    297         "`run` supported with either positional arguments or keyword arguments,"
    298         " but none were provided."
    299     )

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:166, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)
--> 166     raise e
    167 run_manager.on_chain_end(outputs)
    168 final_outputs: Dict[str, Any] = self.prep_outputs(
    169     inputs, outputs, return_only_outputs
    170 )

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:160, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    154 run_manager = callback_manager.on_chain_start(
    155     dumpd(self),
    156     inputs,
    157 )
    158 try:
    159     outputs = (
--> 160         self._call(inputs, run_manager=run_manager)
    161         if new_arg_supported
    162         else self._call(inputs)
    163     )
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chains\combine_documents\base.py:84, in BaseCombineDocumentsChain._call(self, inputs, run_manager)
     82 # Other keys are assumed to be needed for LLM prediction
     83 other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
---> 84 output, extra_return_dict = self.combine_docs(
     85     docs, callbacks=_run_manager.get_child(), **other_keys
     86 )
     87 extra_return_dict[self.output_key] = output
     88 return extra_return_dict

File ~\venv\py311openai\Lib\site-packages\langchain\chains\combine_documents\stuff.py:87, in StuffDocumentsChain.combine_docs(self, docs, callbacks, **kwargs)
     85 inputs = self._get_inputs(docs, **kwargs)
     86 # Call predict on the LLM.
---> 87 return self.llm_chain.predict(callbacks=callbacks, **inputs), {}

File ~\venv\py311openai\Lib\site-packages\langchain\chains\llm.py:252, in LLMChain.predict(self, callbacks, **kwargs)
    237 def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
    238     """Format prompt with kwargs and pass to LLM.
    239 
    240     Args:
   (...)
    250             completion = llm.predict(adjective="funny")
    251     """
--> 252     return self(kwargs, callbacks=callbacks)[self.output_key]

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:166, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)
--> 166     raise e
    167 run_manager.on_chain_end(outputs)
    168 final_outputs: Dict[str, Any] = self.prep_outputs(
    169     inputs, outputs, return_only_outputs
    170 )

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:160, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    154 run_manager = callback_manager.on_chain_start(
    155     dumpd(self),
    156     inputs,
    157 )
    158 try:
    159     outputs = (
--> 160         self._call(inputs, run_manager=run_manager)
    161         if new_arg_supported
    162         else self._call(inputs)
    163     )
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chains\llm.py:92, in LLMChain._call(self, inputs, run_manager)
     87 def _call(
     88     self,
     89     inputs: Dict[str, Any],
     90     run_manager: Optional[CallbackManagerForChainRun] = None,
     91 ) -> Dict[str, str]:
---> 92     response = self.generate([inputs], run_manager=run_manager)
     93     return self.create_outputs(response)[0]

File ~\venv\py311openai\Lib\site-packages\langchain\chains\llm.py:102, in LLMChain.generate(self, input_list, run_manager)
    100 """Generate LLM result from inputs."""
    101 prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
--> 102 return self.llm.generate_prompt(
    103     prompts,
    104     stop,
    105     callbacks=run_manager.get_child() if run_manager else None,
    106     **self.llm_kwargs,
    107 )

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\base.py:167, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    159 def generate_prompt(
    160     self,
    161     prompts: List[PromptValue],
   (...)
    164     **kwargs: Any,
    165 ) -> LLMResult:
    166     prompt_messages = [p.to_messages() for p in prompts]
--> 167     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\base.py:102, in BaseChatModel.generate(self, messages, stop, callbacks, tags, **kwargs)
    100 except (KeyboardInterrupt, Exception) as e:
    101     run_manager.on_llm_error(e)
--> 102     raise e
    103 llm_output = self._combine_llm_outputs([res.llm_output for res in results])
    104 generations = [res.generations for res in results]

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\base.py:94, in BaseChatModel.generate(self, messages, stop, callbacks, tags, **kwargs)
     90 new_arg_supported = inspect.signature(self._generate).parameters.get(
     91     "run_manager"
     92 )
     93 try:
---> 94     results = [
     95         self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
     96         if new_arg_supported
     97         else self._generate(m, stop=stop)
     98         for m in messages
     99     ]
    100 except (KeyboardInterrupt, Exception) as e:
    101     run_manager.on_llm_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\base.py:95, in <listcomp>(.0)
     90 new_arg_supported = inspect.signature(self._generate).parameters.get(
     91     "run_manager"
     92 )
     93 try:
     94     results = [
---> 95         self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
     96         if new_arg_supported
     97         else self._generate(m, stop=stop)
     98         for m in messages
     99     ]
    100 except (KeyboardInterrupt, Exception) as e:
    101     run_manager.on_llm_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\openai.py:359, in ChatOpenAI._generate(self, messages, stop, run_manager, **kwargs)
    351     message = _convert_dict_to_message(
    352         {
    353             "content": inner_completion,
   (...)
    356         }
    357     )
    358     return ChatResult(generations=[ChatGeneration(message=message)])
--> 359 response = self.completion_with_retry(messages=message_dicts, **params)
    360 return self._create_chat_result(response)

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\openai.py:307, in ChatOpenAI.completion_with_retry(self, **kwargs)
    303 @retry_decorator
    304 def _completion_with_retry(**kwargs: Any) -> Any:
    305     return self.client.create(**kwargs)
--> 307 return _completion_with_retry(**kwargs)

File ~\venv\py311openai\Lib\site-packages\tenacity\__init__.py:289, in BaseRetrying.wraps.<locals>.wrapped_f(*args, **kw)
    287 @functools.wraps(f)
    288 def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
--> 289     return self(f, *args, **kw)

File ~\venv\py311openai\Lib\site-packages\tenacity\__init__.py:379, in Retrying.__call__(self, fn, *args, **kwargs)
    377 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
    378 while True:
--> 379     do = self.iter(retry_state=retry_state)
    380     if isinstance(do, DoAttempt):
    381         try:

File ~\venv\py311openai\Lib\site-packages\tenacity\__init__.py:314, in BaseRetrying.iter(self, retry_state)
    312 is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain)
    313 if not (is_explicit_retry or self.retry(retry_state)):
--> 314     return fut.result()
    316 if self.after is not None:
    317     self.after(retry_state)

File ~\AppData\Local\Programs\Python\Python311\Lib\concurrent\futures\_base.py:449, in Future.result(self, timeout)
    447     raise CancelledError()
    448 elif self._state == FINISHED:
--> 449     return self.__get_result()
    451 self._condition.wait(timeout)
    453 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:

File ~\AppData\Local\Programs\Python\Python311\Lib\concurrent\futures\_base.py:401, in Future.__get_result(self)
    399 if self._exception:
    400     try:
--> 401         raise self._exception
    402     finally:
    403         # Break a reference cycle with the exception in self._exception
    404         self = None

File ~\venv\py311openai\Lib\site-packages\tenacity\__init__.py:382, in Retrying.__call__(self, fn, *args, **kwargs)
    380 if isinstance(do, DoAttempt):
    381     try:
--> 382         result = fn(*args, **kwargs)
    383     except BaseException:  # noqa: B902
    384         retry_state.set_exception(sys.exc_info())  # type: ignore[arg-type]

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\openai.py:305, in ChatOpenAI.completion_with_retry.<locals>._completion_with_retry(**kwargs)
    303 @retry_decorator
    304 def _completion_with_retry(**kwargs: Any) -> Any:
--> 305     return self.client.create(**kwargs)

File ~\venv\py311openai\Lib\site-packages\openai\api_resources\chat_completion.py:25, in ChatCompletion.create(cls, *args, **kwargs)
     23 while True:
     24     try:
---> 25         return super().create(*args, **kwargs)
     26     except TryAgain as e:
     27         if timeout is not None and time.time() > start + timeout:

File ~\venv\py311openai\Lib\site-packages\openai\api_resources\abstract\engine_api_resource.py:153, in EngineAPIResource.create(cls, api_key, api_base, api_type, request_id, api_version, organization, **params)
    127 @classmethod
    128 def create(
    129     cls,
   (...)
    136     **params,
    137 ):
    138     (
    139         deployment_id,
    140         engine,
   (...)
    150         api_key, api_base, api_type, api_version, organization, **params
    151     )
--> 153     response, _, api_key = requestor.request(
    154         "post",
    155         url,
    156         params=params,
    157         headers=headers,
    158         stream=stream,
    159         request_id=request_id,
    160         request_timeout=request_timeout,
    161     )
    163     if stream:
    164         # must be an iterator
    165         assert not isinstance(response, OpenAIResponse)

File ~\venv\py311openai\Lib\site-packages\openai\api_requestor.py:298, in APIRequestor.request(self, method, url, params, headers, files, stream, request_id, request_timeout)
    277 def request(
    278     self,
    279     method,
   (...)
    286     request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
    287 ) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
    288     result = self.request_raw(
    289         method.lower(),
    290         url,
   (...)
    296         request_timeout=request_timeout,
    297     )
--> 298     resp, got_stream = self._interpret_response(result, stream)
    299     return resp, got_stream, self.api_key

File ~\venv\py311openai\Lib\site-packages\openai\api_requestor.py:700, in APIRequestor._interpret_response(self, result, stream)
    692     return (
    693         self._interpret_response_line(
    694             line, result.status_code, result.headers, stream=True
    695         )
    696         for line in parse_stream(result.iter_lines())
    697     ), True
    698 else:
    699     return (
--> 700         self._interpret_response_line(
    701             result.content.decode("utf-8"),
    702             result.status_code,
    703             result.headers,
    704             stream=False,
    705         ),
    706         False,
    707     )

File ~\venv\py311openai\Lib\site-packages\openai\api_requestor.py:763, in APIRequestor._interpret_response_line(self, rbody, rcode, rheaders, stream)
    761 stream_error = stream and "error" in resp.data
    762 if stream_error or not 200 <= rcode < 300:
--> 763     raise self.handle_error_response(
    764         rbody, rcode, resp.data, rheaders, stream_error=stream_error
    765     )
    766 return resp

InvalidRequestError: Unrecognized request arguments supplied: function_call, functions

Idea or request for content:

No response

About this issue

  • Original URL
  • State: open
  • Created a year ago
  • Reactions: 1
  • Comments: 20

Most upvoted comments

Answer generated by a 🤖

Answer

Hello,

The error message “InvalidRequestError: Unrecognized request arguments supplied: function_call, functions” suggests that the function retrieval_qa.run() is being called with incorrect arguments. In your case, you are passing a dictionary to the run() function.

However, based on the context shared from the test_retrieval_qa.py file and the new context from the base.py file, the run() function expects a dictionary with a specific key-value pair, not just any dictionary. The key should be “query” and the value should be the question string. In the example provided in the context, the run() function is called with a dictionary:

res = indexqa({'query': 'This is my query'})

To fix the error, you should modify your function call to match this format. Here’s how you can do it:

query = "What did the president say about russia"
retrieval_qa.run({"query": query})

Please ensure that the query is a string and it is passed as a value to the “query” key in the dictionary.

If you continue to experience issues, please provide more information about the version of LangChain you are using and any other relevant details about your environment.

Best, Dosu

Sources


This response is meant to be useful, save you time, and share context. It is not meant to be a precise solution, but rather a starting point for your own research.

Help me be more useful! Please leave a 👍 if this is helpful and 👎 if it is irrelevant.

Use API version “2023-07-01-preview” and the 0613 version of the model

It wasn’t a problem a while ago, but now the problem has reappeared. openai.error.InvalidRequestError: Unrecognized request argument supplied: functions

Answer generated by a 🤖

Answer

Based on the context provided, it seems like you’re encountering the error because you’re passing the query directly to the run method. The run method in the Chain class, which BaseRetrievalQA extends from, expects a dictionary as its argument, where the key is the input_key of the BaseRetrievalQA instance and the value is the query string. In the BaseRetrievalQA class, the input_key is set to “query”.

So, instead of calling retrieval_qa.run(query), you should call retrieval_qa.run({"query": query}). Here is the corrected code:

query = "What did the president say about russia"
result = retrieval_qa.run({"query": query})

This should resolve the “InvalidRequestError” you are encountering. If you continue to face issues, please provide more details about your code and the steps you’re following so I can assist you better.

Sources


This response is meant to be useful, save you time, and share context. It is not meant to be a precise solution, but rather a starting point for your own research.

Help me be more useful! Please leave a 👍 if this is helpful and 👎 if it is irrelevant.

🤖

Based on the error message you’re encountering, it seems like you’re passing arguments that are not recognized by the function you’re calling. The error “InvalidRequestError: Unrecognized request arguments supplied: function_call, functions” suggests that function_call and functions are not recognized arguments.

In the LangChain library, functions such as similarity_search_with_score_by_vector, asimilarity_search_by_vector, _asearch_with_score_by_vector, amax_marginal_relevance_search, max_marginal_relevance_search_with_score_by_vector and others accept a specific set of arguments. These include embedding, k, filter, search_params, offset, score_threshold, consistency, and **kwargs. The **kwargs argument is used to pass any additional named arguments to the QdrantClient.search() function. However, function_call and functions are not recognized by this function. You can check the source code here.

As for the RetrievalQA.run() method, it’s not explicitly defined in the provided context. However, it’s likely that this method is inherited from the Chain base class or another superclass not shown in the context. The context does show the _call() and _acall() methods, which are likely called by the run() method. These methods take inputs (a dictionary where the key is the input_key and the value is the question to be answered) and run_manager (an optional instance of CallbackManagerForChainRun or AsyncCallbackManagerForChainRun for the _call() and _acall() methods respectively). If not provided, a no-operation manager is used. You can check the source code here.

To resolve your issue, ensure that you are only passing recognized arguments to the function. If function_call and functions are not necessary for your query, you should remove them. If they are necessary, you should check the documentation for QdrantClient.search() to see how to correctly pass these arguments.

Sources


This response is meant to be useful and save you time. It is not meant to be a precise solution, but rather a starting point for your own research.

Help me be more useful! Please leave a 👍 if this is helpful and 👎 if it is irrelevant.

If you want to continue the conversation, start your reply with @dosu-bot.

I am still have this same problem. Does anyone know how to fix this?

I updated the code to:

result = retrieval_qa.run({"query": query})
print(result)

Still got the same error:

---------------------------------------------------------------------------
InvalidRequestError                       Traceback (most recent call last)
Cell In[2], line 70
     64 retrieval_qa = RetrievalQA(
     65     retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
     66 )
     68 query = "What did the president say about russia"
---> 70 result = retrieval_qa.run({"query": query})
     71 print(result)

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:290, in Chain.run(self, callbacks, tags, *args, **kwargs)
    288     if len(args) != 1:
    289         raise ValueError("`run` supports only one positional argument.")
--> 290     return self(args[0], callbacks=callbacks, tags=tags)[_output_key]
    292 if kwargs and not args:
    293     return self(kwargs, callbacks=callbacks, tags=tags)[_output_key]

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:166, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)
--> 166     raise e
    167 run_manager.on_chain_end(outputs)
    168 final_outputs: Dict[str, Any] = self.prep_outputs(
    169     inputs, outputs, return_only_outputs
    170 )

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:160, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    154 run_manager = callback_manager.on_chain_start(
    155     dumpd(self),
    156     inputs,
    157 )
    158 try:
    159     outputs = (
--> 160         self._call(inputs, run_manager=run_manager)
    161         if new_arg_supported
    162         else self._call(inputs)
    163     )
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chains\retrieval_qa\base.py:120, in BaseRetrievalQA._call(self, inputs, run_manager)
    117 question = inputs[self.input_key]
    119 docs = self._get_docs(question)
--> 120 answer = self.combine_documents_chain.run(
    121     input_documents=docs, question=question, callbacks=_run_manager.get_child()
    122 )
    124 if self.return_source_documents:
    125     return {self.output_key: answer, "source_documents": docs}

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:293, in Chain.run(self, callbacks, tags, *args, **kwargs)
    290     return self(args[0], callbacks=callbacks, tags=tags)[_output_key]
    292 if kwargs and not args:
--> 293     return self(kwargs, callbacks=callbacks, tags=tags)[_output_key]
    295 if not kwargs and not args:
    296     raise ValueError(
    297         "`run` supported with either positional arguments or keyword arguments,"
    298         " but none were provided."
    299     )

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:166, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)
--> 166     raise e
    167 run_manager.on_chain_end(outputs)
    168 final_outputs: Dict[str, Any] = self.prep_outputs(
    169     inputs, outputs, return_only_outputs
    170 )

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:160, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    154 run_manager = callback_manager.on_chain_start(
    155     dumpd(self),
    156     inputs,
    157 )
    158 try:
    159     outputs = (
--> 160         self._call(inputs, run_manager=run_manager)
    161         if new_arg_supported
    162         else self._call(inputs)
    163     )
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chains\combine_documents\base.py:84, in BaseCombineDocumentsChain._call(self, inputs, run_manager)
     82 # Other keys are assumed to be needed for LLM prediction
     83 other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
---> 84 output, extra_return_dict = self.combine_docs(
     85     docs, callbacks=_run_manager.get_child(), **other_keys
     86 )
     87 extra_return_dict[self.output_key] = output
     88 return extra_return_dict

File ~\venv\py311openai\Lib\site-packages\langchain\chains\combine_documents\stuff.py:87, in StuffDocumentsChain.combine_docs(self, docs, callbacks, **kwargs)
     85 inputs = self._get_inputs(docs, **kwargs)
     86 # Call predict on the LLM.
---> 87 return self.llm_chain.predict(callbacks=callbacks, **inputs), {}

File ~\venv\py311openai\Lib\site-packages\langchain\chains\llm.py:252, in LLMChain.predict(self, callbacks, **kwargs)
    237 def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
    238     """Format prompt with kwargs and pass to LLM.
    239 
    240     Args:
   (...)
    250             completion = llm.predict(adjective="funny")
    251     """
--> 252     return self(kwargs, callbacks=callbacks)[self.output_key]

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:166, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)
--> 166     raise e
    167 run_manager.on_chain_end(outputs)
    168 final_outputs: Dict[str, Any] = self.prep_outputs(
    169     inputs, outputs, return_only_outputs
    170 )

File ~\venv\py311openai\Lib\site-packages\langchain\chains\base.py:160, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, include_run_info)
    154 run_manager = callback_manager.on_chain_start(
    155     dumpd(self),
    156     inputs,
    157 )
    158 try:
    159     outputs = (
--> 160         self._call(inputs, run_manager=run_manager)
    161         if new_arg_supported
    162         else self._call(inputs)
    163     )
    164 except (KeyboardInterrupt, Exception) as e:
    165     run_manager.on_chain_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chains\llm.py:92, in LLMChain._call(self, inputs, run_manager)
     87 def _call(
     88     self,
     89     inputs: Dict[str, Any],
     90     run_manager: Optional[CallbackManagerForChainRun] = None,
     91 ) -> Dict[str, str]:
---> 92     response = self.generate([inputs], run_manager=run_manager)
     93     return self.create_outputs(response)[0]

File ~\venv\py311openai\Lib\site-packages\langchain\chains\llm.py:102, in LLMChain.generate(self, input_list, run_manager)
    100 """Generate LLM result from inputs."""
    101 prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
--> 102 return self.llm.generate_prompt(
    103     prompts,
    104     stop,
    105     callbacks=run_manager.get_child() if run_manager else None,
    106     **self.llm_kwargs,
    107 )

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\base.py:167, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    159 def generate_prompt(
    160     self,
    161     prompts: List[PromptValue],
   (...)
    164     **kwargs: Any,
    165 ) -> LLMResult:
    166     prompt_messages = [p.to_messages() for p in prompts]
--> 167     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\base.py:102, in BaseChatModel.generate(self, messages, stop, callbacks, tags, **kwargs)
    100 except (KeyboardInterrupt, Exception) as e:
    101     run_manager.on_llm_error(e)
--> 102     raise e
    103 llm_output = self._combine_llm_outputs([res.llm_output for res in results])
    104 generations = [res.generations for res in results]

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\base.py:94, in BaseChatModel.generate(self, messages, stop, callbacks, tags, **kwargs)
     90 new_arg_supported = inspect.signature(self._generate).parameters.get(
     91     "run_manager"
     92 )
     93 try:
---> 94     results = [
     95         self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
     96         if new_arg_supported
     97         else self._generate(m, stop=stop)
     98         for m in messages
     99     ]
    100 except (KeyboardInterrupt, Exception) as e:
    101     run_manager.on_llm_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\base.py:95, in <listcomp>(.0)
     90 new_arg_supported = inspect.signature(self._generate).parameters.get(
     91     "run_manager"
     92 )
     93 try:
     94     results = [
---> 95         self._generate(m, stop=stop, run_manager=run_manager, **kwargs)
     96         if new_arg_supported
     97         else self._generate(m, stop=stop)
     98         for m in messages
     99     ]
    100 except (KeyboardInterrupt, Exception) as e:
    101     run_manager.on_llm_error(e)

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\openai.py:359, in ChatOpenAI._generate(self, messages, stop, run_manager, **kwargs)
    351     message = _convert_dict_to_message(
    352         {
    353             "content": inner_completion,
   (...)
    356         }
    357     )
    358     return ChatResult(generations=[ChatGeneration(message=message)])
--> 359 response = self.completion_with_retry(messages=message_dicts, **params)
    360 return self._create_chat_result(response)

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\openai.py:307, in ChatOpenAI.completion_with_retry(self, **kwargs)
    303 @retry_decorator
    304 def _completion_with_retry(**kwargs: Any) -> Any:
    305     return self.client.create(**kwargs)
--> 307 return _completion_with_retry(**kwargs)

File ~\venv\py311openai\Lib\site-packages\tenacity\__init__.py:289, in BaseRetrying.wraps.<locals>.wrapped_f(*args, **kw)
    287 @functools.wraps(f)
    288 def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
--> 289     return self(f, *args, **kw)

File ~\venv\py311openai\Lib\site-packages\tenacity\__init__.py:379, in Retrying.__call__(self, fn, *args, **kwargs)
    377 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
    378 while True:
--> 379     do = self.iter(retry_state=retry_state)
    380     if isinstance(do, DoAttempt):
    381         try:

File ~\venv\py311openai\Lib\site-packages\tenacity\__init__.py:314, in BaseRetrying.iter(self, retry_state)
    312 is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain)
    313 if not (is_explicit_retry or self.retry(retry_state)):
--> 314     return fut.result()
    316 if self.after is not None:
    317     self.after(retry_state)

File ~\AppData\Local\Programs\Python\Python311\Lib\concurrent\futures\_base.py:449, in Future.result(self, timeout)
    447     raise CancelledError()
    448 elif self._state == FINISHED:
--> 449     return self.__get_result()
    451 self._condition.wait(timeout)
    453 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:

File ~\AppData\Local\Programs\Python\Python311\Lib\concurrent\futures\_base.py:401, in Future.__get_result(self)
    399 if self._exception:
    400     try:
--> 401         raise self._exception
    402     finally:
    403         # Break a reference cycle with the exception in self._exception
    404         self = None

File ~\venv\py311openai\Lib\site-packages\tenacity\__init__.py:382, in Retrying.__call__(self, fn, *args, **kwargs)
    380 if isinstance(do, DoAttempt):
    381     try:
--> 382         result = fn(*args, **kwargs)
    383     except BaseException:  # noqa: B902
    384         retry_state.set_exception(sys.exc_info())  # type: ignore[arg-type]

File ~\venv\py311openai\Lib\site-packages\langchain\chat_models\openai.py:305, in ChatOpenAI.completion_with_retry.<locals>._completion_with_retry(**kwargs)
    303 @retry_decorator
    304 def _completion_with_retry(**kwargs: Any) -> Any:
--> 305     return self.client.create(**kwargs)

File ~\venv\py311openai\Lib\site-packages\openai\api_resources\chat_completion.py:25, in ChatCompletion.create(cls, *args, **kwargs)
     23 while True:
     24     try:
---> 25         return super().create(*args, **kwargs)
     26     except TryAgain as e:
     27         if timeout is not None and time.time() > start + timeout:

File ~\venv\py311openai\Lib\site-packages\openai\api_resources\abstract\engine_api_resource.py:153, in EngineAPIResource.create(cls, api_key, api_base, api_type, request_id, api_version, organization, **params)
    127 @classmethod
    128 def create(
    129     cls,
   (...)
    136     **params,
    137 ):
    138     (
    139         deployment_id,
    140         engine,
   (...)
    150         api_key, api_base, api_type, api_version, organization, **params
    151     )
--> 153     response, _, api_key = requestor.request(
    154         "post",
    155         url,
    156         params=params,
    157         headers=headers,
    158         stream=stream,
    159         request_id=request_id,
    160         request_timeout=request_timeout,
    161     )
    163     if stream:
    164         # must be an iterator
    165         assert not isinstance(response, OpenAIResponse)

File ~\venv\py311openai\Lib\site-packages\openai\api_requestor.py:298, in APIRequestor.request(self, method, url, params, headers, files, stream, request_id, request_timeout)
    277 def request(
    278     self,
    279     method,
   (...)
    286     request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
    287 ) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
    288     result = self.request_raw(
    289         method.lower(),
    290         url,
   (...)
    296         request_timeout=request_timeout,
    297     )
--> 298     resp, got_stream = self._interpret_response(result, stream)
    299     return resp, got_stream, self.api_key

File ~\venv\py311openai\Lib\site-packages\openai\api_requestor.py:700, in APIRequestor._interpret_response(self, result, stream)
    692     return (
    693         self._interpret_response_line(
    694             line, result.status_code, result.headers, stream=True
    695         )
    696         for line in parse_stream(result.iter_lines())
    697     ), True
    698 else:
    699     return (
--> 700         self._interpret_response_line(
    701             result.content.decode("utf-8"),
    702             result.status_code,
    703             result.headers,
    704             stream=False,
    705         ),
    706         False,
    707     )

File ~\venv\py311openai\Lib\site-packages\openai\api_requestor.py:763, in APIRequestor._interpret_response_line(self, rbody, rcode, rheaders, stream)
    761 stream_error = stream and "error" in resp.data
    762 if stream_error or not 200 <= rcode < 300:
--> 763     raise self.handle_error_response(
    764         rbody, rcode, resp.data, rheaders, stream_error=stream_error
    765     )
    766 return resp

InvalidRequestError: Unrecognized request arguments supplied: function_call, functions

Has anyone got the solution for this issue yet? I’m having the same issue

from langchain.chat_models import ChatOpenAI
from langchain.chains import create_extraction_chain_pydantic

llm = ChatOpenAI(
    model=config["MODEL_NAME"],
    engine=config["MODEL_NAME"],
    openai_api_key=config["OPENAI_API_KEY"],
    temperature=0,
    openai_api_base=config["OPENAI_API_BASE"],
)


chain = create_extraction_chain_pydantic(pydantic_schema=Interview, llm=llm)
result = chain.run(file_content)

result

Error:

InvalidRequestError: Unrecognized request argument supplied: functions

Never mind - I resolved the issue by upgrading langchain to the later versions. (Currently sitting at langchain==0.0.309)