From 976b45661900e44f28179327dc0c178d6d839191 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Thu, 20 Jun 2024 21:00:22 -0700 Subject: [PATCH 1/9] docs: BaseChatModel key methods table (#23238) If we're moving documenting inherited params think these kinds of tables become more important ![Screenshot 2024-06-20 at 3 59 12 PM](https://github.com/langchain-ai/langchain/assets/22008038/722266eb-2353-4e85-8fae-76b19bd333e0) --- .../create_chat_model_docstring_tables.py | 120 ++++++++++++++++++ .../language_models/chat_models.py | 100 +++++++++++---- 2 files changed, 194 insertions(+), 26 deletions(-) create mode 100644 docs/scripts/create_chat_model_docstring_tables.py diff --git a/docs/scripts/create_chat_model_docstring_tables.py b/docs/scripts/create_chat_model_docstring_tables.py new file mode 100644 index 0000000000000..bf911d0897b51 --- /dev/null +++ b/docs/scripts/create_chat_model_docstring_tables.py @@ -0,0 +1,120 @@ +imperative = [ + [ + "invoke", + "str | List[dict | tuple | BaseMessage] | PromptValue", + "BaseMessage", + "A single chat model call.", + ], + [ + "ainvoke", + "'''", + "BaseMessage", + "Defaults to running invoke in an async executor.", + ], + [ + "stream", + "'''", + "Iterator[BaseMessageChunk]", + "Defaults to yielding output of invoke.", + ], + [ + "astream", + "'''", + "AsyncIterator[BaseMessageChunk]", + "Defaults to yielding output of ainvoke.", + ], + [ + "astream_events", + "'''", + "AsyncIterator[StreamEvent]", + "Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'.", + ], + [ + "batch", + "List[''']", + "List[BaseMessage]", + "Defaults to running invoke in concurrent threads.", + ], + [ + "abatch", + "List[''']", + "List[BaseMessage]", + "Defaults to running ainvoke in concurrent threads.", + ], + [ + "batch_as_completed", + "List[''']", + "Iterator[Tuple[int, Union[BaseMessage, Exception]]]", + "Defaults to running invoke in concurrent threads.", + ], + [ + "abatch_as_completed", + "List[''']", + "AsyncIterator[Tuple[int, Union[BaseMessage, Exception]]]", + "Defaults to running ainvoke in concurrent threads.", + ], +] +declarative = [ + [ + "bind_tools", + # "Tools, ...", + # "Runnable with same inputs/outputs as ChatModel", + "Create ChatModel that can call tools.", + ], + [ + "with_structured_output", + # "An output schema, ...", + # "Runnable that takes ChatModel inputs and returns a dict or Pydantic object", + "Create wrapper that structures model output using schema.", + ], + [ + "with_retry", + # "Max retries, exceptions to handle, ...", + # "Runnable with same inputs/outputs as ChatModel", + "Create wrapper that retries model calls on failure.", + ], + [ + "with_fallbacks", + # "List of models to fall back on", + # "Runnable with same inputs/outputs as ChatModel", + "Create wrapper that falls back to other models on failure.", + ], + [ + "configurable_fields", + # "*ConfigurableField", + # "Runnable with same inputs/outputs as ChatModel", + "Specify init args of the model that can be configured at runtime via the RunnableConfig.", + ], + [ + "configurable_alternatives", + # "ConfigurableField, ...", + # "Runnable with same inputs/outputs as ChatModel", + "Specify alternative models which can be swapped in at runtime via the RunnableConfig.", + ], +] + + +def create_table(to_build: list) -> str: + for x in to_build: + x[0] = "`" + x[0] + "`" + longest = [max(len(x[i]) for x in to_build) for i in range(len(to_build[0]))] + widths = [int(1.2 * col) for col in longest] + headers = ( + ["Method", "Input", "Output", "Description"] + if len(widths) == 4 + else ["Method", "Description"] + ) + rows = [[h + " " * (w - len(h)) for w, h in zip(widths, headers)]] + for x in to_build: + rows.append([y + " " * (w - len(y)) for w, y in zip(widths, x)]) + + table = [" | ".join(([""] + x + [""])).strip() for x in rows] + lines = [ + "+".join(([""] + ["-" * (len(y) + 2) for y in x] + [""])).strip() for x in rows + ] + lines[1] = lines[1].replace("-", "=") + lines.append(lines[-1]) + rst = lines[0] + for r, li in zip(table, lines[1:]): + rst += "\n" + r + "\n" + li + return rst diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 5b3bf9a48fd4b..dfc9e94e703c9 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -115,32 +115,80 @@ async def agenerate_from_stream( class BaseChatModel(BaseLanguageModel[BaseMessage], ABC): - """Base class for Chat models. - - Custom chat model implementations should inherit from this class. - - Follow the guide for more information on how to implement a - custom Chat Model: - [Guide](https://python.langchain.com/v0.2/docs/how_to/custom_chat_model/). - - Please reference the table below for information about which - methods and properties are required or optional for implementations. - - +----------------------------------+--------------------------------------------------------------------+-------------------+ - | Method/Property | Description | Required/Optional | - +==================================+====================================================================+===================+ - | `_generate` | Use to generate a chat result from a prompt | Required | - +----------------------------------+--------------------------------------------------------------------+-------------------+ - | `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required | - +----------------------------------+--------------------------------------------------------------------+-------------------+ - | `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional | - +----------------------------------+--------------------------------------------------------------------+-------------------+ - | `_stream` | Use to implement streaming | Optional | - +----------------------------------+--------------------------------------------------------------------+-------------------+ - | `_agenerate` | Use to implement a native async method | Optional | - +----------------------------------+--------------------------------------------------------------------+-------------------+ - | `_astream` | Use to implement async version of `_stream` | Optional | - +----------------------------------+--------------------------------------------------------------------+-------------------+ + """Base class for chat models. + + Key imperative methods: + Methods that actually call the underlying model. + + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | Method | Input | Output | Description | + +===========================+================================================================+=====================================================================+==================================================================================================+ + | `invoke` | str | List[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `batch` | List['''] | List[BaseMessage] | Defaults to running invoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `abatch` | List['''] | List[BaseMessage] | Defaults to running ainvoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `batch_as_completed` | List['''] | Iterator[Tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `abatch_as_completed` | List['''] | AsyncIterator[Tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + + This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation. + + Key declarative methods: + Methods for creating another Runnable using the ChatModel. + + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | Method | Description | + +==================================+===========================================================================================================+ + | `bind_tools` | Create ChatModel that can call tools. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `with_structured_output` | Create wrapper that structures model output using schema. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `with_retry` | Create wrapper that retries model calls on failure. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `with_fallbacks` | Create wrapper that falls back to other models on failure. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + + This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation. + + Creating custom chat model: + Custom chat model implementations should inherit from this class. + Please reference the table below for information about which + methods and properties are required or optional for implementations. + + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | Method/Property | Description | Required/Optional | + +==================================+====================================================================+===================+ + | `_generate` | Use to generate a chat result from a prompt | Required | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_stream` | Use to implement streaming | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_agenerate` | Use to implement a native async method | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_astream` | Use to implement async version of `_stream` | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + + Follow the guide for more information on how to implement a custom Chat Model: + [Guide](https://python.langchain.com/v0.2/docs/how_to/custom_chat_model/). + """ # noqa: E501 callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) From b108b4d01011782fe4de9d09a473027179474847 Mon Sep 17 00:00:00 2001 From: mackong Date: Fri, 21 Jun 2024 21:30:27 +0800 Subject: [PATCH 2/9] core[patch]: set schema format for AsyncRootListenersTracer (#23214) - **Description:** AsyncRootListenersTracer support on_chat_model_start, it's schema_format should be "original+chat". - **Issue:** N/A - **Dependencies:** --- libs/core/langchain_core/tracers/root_listeners.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/core/langchain_core/tracers/root_listeners.py b/libs/core/langchain_core/tracers/root_listeners.py index 541052e2fddd3..54ec1ba255a98 100644 --- a/libs/core/langchain_core/tracers/root_listeners.py +++ b/libs/core/langchain_core/tracers/root_listeners.py @@ -71,7 +71,7 @@ def __init__( on_end: Optional[AsyncListener], on_error: Optional[AsyncListener], ) -> None: - super().__init__() + super().__init__(_schema_format="original+chat") self.config = config self._arg_on_start = on_start From 401d469a92223d5b028ea86af09e12ada056d700 Mon Sep 17 00:00:00 2001 From: Mu Yang Date: Fri, 21 Jun 2024 22:05:38 +0800 Subject: [PATCH 3/9] langchain: fix systax warning in create_json_chat_agent (#23253) fix systax warning in `create_json_chat_agent` ``` .../langchain/agents/json_chat/base.py:22: SyntaxWarning: invalid escape sequence '\ ' """Create an agent that uses JSON to format its logic, build for Chat Models. ``` --- libs/langchain/langchain/agents/json_chat/base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/langchain/langchain/agents/json_chat/base.py b/libs/langchain/langchain/agents/json_chat/base.py index ecf4ce7f7ce00..803e2436bacf7 100644 --- a/libs/langchain/langchain/agents/json_chat/base.py +++ b/libs/langchain/langchain/agents/json_chat/base.py @@ -122,8 +122,8 @@ def create_json_chat_agent( ```json {{ - "action": string, \ The action to take. Must be one of {tool_names} - "action_input": string \ The input to the action + "action": string, \\ The action to take. Must be one of {tool_names} + "action_input": string \\ The input to the action }} ``` @@ -134,7 +134,7 @@ def create_json_chat_agent( ```json {{ "action": "Final Answer", - "action_input": string \ You should put what you want to return to use here + "action_input": string \\ You should put what you want to return to use here }} ``` From 1c2b9cc9abb4a4244654de0df16d552a0488eee2 Mon Sep 17 00:00:00 2001 From: Philippe PRADOS Date: Fri, 21 Jun 2024 16:37:09 +0200 Subject: [PATCH 4/9] core[minor]: Update pgvector transalor for langchain_postgres (#23217) The SelfQuery PGVectorTranslator is not correct. The operator is "eq" and not "$eq". This patch use a new version of PGVectorTranslator from langchain_postgres. It's necessary to release a new version of langchain_postgres (see [here](https://github.com/langchain-ai/langchain-postgres/pull/75) before accepting this PR in langchain. --- libs/langchain/langchain/retrievers/self_query/base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/langchain/langchain/retrievers/self_query/base.py b/libs/langchain/langchain/retrievers/self_query/base.py index f631f4e54788e..b3cb8fc4c3559 100644 --- a/libs/langchain/langchain/retrievers/self_query/base.py +++ b/libs/langchain/langchain/retrievers/self_query/base.py @@ -170,11 +170,12 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: try: from langchain_postgres import PGVector + from langchain_postgres import PGVectorTranslator as NewPGVectorTranslator except ImportError: pass else: if isinstance(vectorstore, PGVector): - return PGVectorTranslator() + return NewPGVectorTranslator() raise ValueError( f"Self query retriever with Vector Store type {vectorstore.__class__}" From 360a70c8a884d10c8320592869c1b2ab29f7d233 Mon Sep 17 00:00:00 2001 From: mackong Date: Fri, 21 Jun 2024 22:39:47 +0800 Subject: [PATCH 5/9] core[patch]: fix no current event loop for sql history in async mode (#22933) - **Description:** When use RunnableWithMessageHistory/SQLChatMessageHistory in async mode, we'll get the following error: ``` Error in RootListenersTracer.on_chain_end callback: RuntimeError("There is no current event loop in thread 'asyncio_3'.") ``` which throwed by https://github.com/langchain-ai/langchain/blob/ddfbca38dfa22954eaeda38614c6e1ec0cdecaa9/libs/community/langchain_community/chat_message_histories/sql.py#L259. and no message history will be add to database. In this patch, a new _aexit_history function which will'be called in async mode is added, and in turn aadd_messages will be called. In this patch, we use `afunc` attribute of a Runnable to check if the end listener should be run in async mode or not. - **Issue:** #22021, #22022 - **Dependencies:** N/A --- .../chat_message_histories/sql.py | 17 +- libs/core/langchain_core/runnables/history.py | 37 +- .../unit_tests/runnables/test_history.py | 326 ++++++++++++++++-- 3 files changed, 345 insertions(+), 35 deletions(-) diff --git a/libs/community/langchain_community/chat_message_histories/sql.py b/libs/community/langchain_community/chat_message_histories/sql.py index 9264dbeff0ed8..edfed45466fe4 100644 --- a/libs/community/langchain_community/chat_message_histories/sql.py +++ b/libs/community/langchain_community/chat_message_histories/sql.py @@ -1,4 +1,3 @@ -import asyncio import contextlib import json import logging @@ -252,17 +251,11 @@ async def aadd_message(self, message: BaseMessage) -> None: await session.commit() def add_messages(self, messages: Sequence[BaseMessage]) -> None: - # The method RunnableWithMessageHistory._exit_history() call - # add_message method by mistake and not aadd_message. - # See https://github.com/langchain-ai/langchain/issues/22021 - if self.async_mode: - loop = asyncio.get_event_loop() - loop.run_until_complete(self.aadd_messages(messages)) - else: - with self._make_sync_session() as session: - for message in messages: - session.add(self.converter.to_sql_model(message, self.session_id)) - session.commit() + # Add all messages in one transaction + with self._make_sync_session() as session: + for message in messages: + session.add(self.converter.to_sql_model(message, self.session_id)) + session.commit() async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: # Add all messages in one transaction diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index 326f941263c81..05d52c38bf888 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -16,6 +16,7 @@ from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.load.load import load from langchain_core.pydantic_v1 import BaseModel +from langchain_core.runnables import RunnableBranch from langchain_core.runnables.base import Runnable, RunnableBindingBase, RunnableLambda from langchain_core.runnables.passthrough import RunnablePassthrough from langchain_core.runnables.utils import ( @@ -306,8 +307,17 @@ def get_session_history( history_chain = RunnablePassthrough.assign( **{messages_key: history_chain} ).with_config(run_name="insert_history") - bound = ( - history_chain | runnable.with_listeners(on_end=self._exit_history) + bound: Runnable = ( + history_chain + | RunnableBranch( + ( + RunnableLambda( + self._is_not_async, afunc=self._is_async + ).with_config(run_name="RunnableWithMessageHistoryInAsyncMode"), + runnable.with_alisteners(on_end=self._aexit_history), + ), + runnable.with_listeners(on_end=self._exit_history), + ) ).with_config(run_name="RunnableWithMessageHistory") if history_factory_config: @@ -367,6 +377,12 @@ def get_input_schema( else: return super_schema + def _is_not_async(self, *args: Sequence[Any], **kwargs: Dict[str, Any]) -> bool: + return False + + async def _is_async(self, *args: Sequence[Any], **kwargs: Dict[str, Any]) -> bool: + return True + def _get_input_messages( self, input_val: Union[str, BaseMessage, Sequence[BaseMessage], dict] ) -> List[BaseMessage]: @@ -483,6 +499,23 @@ def _exit_history(self, run: Run, config: RunnableConfig) -> None: output_messages = self._get_output_messages(output_val) hist.add_messages(input_messages + output_messages) + async def _aexit_history(self, run: Run, config: RunnableConfig) -> None: + hist: BaseChatMessageHistory = config["configurable"]["message_history"] + + # Get the input messages + inputs = load(run.inputs) + input_messages = self._get_input_messages(inputs) + # If historic messages were prepended to the input messages, remove them to + # avoid adding duplicate messages to history. + if not self.history_messages_key: + historic_messages = config["configurable"]["message_history"].messages + input_messages = input_messages[len(historic_messages) :] + + # Get the output messages + output_val = load(run.outputs) + output_messages = self._get_output_messages(output_val) + await hist.aadd_messages(input_messages + output_messages) + def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig: config = super()._merge_configs(*configs) expected_keys = [field_spec.id for field_spec in self.history_factory_config] diff --git a/libs/core/tests/unit_tests/runnables/test_history.py b/libs/core/tests/unit_tests/runnables/test_history.py index 3db18b9963f56..60883f9a324e8 100644 --- a/libs/core/tests/unit_tests/runnables/test_history.py +++ b/libs/core/tests/unit_tests/runnables/test_history.py @@ -62,6 +62,31 @@ def test_input_messages() -> None: } +async def test_input_messages_async() -> None: + runnable = RunnableLambda( + lambda messages: "you said: " + + "\n".join(str(m.content) for m in messages if isinstance(m, HumanMessage)) + ) + store: Dict = {} + get_session_history = _get_get_session_history(store=store) + with_history = RunnableWithMessageHistory(runnable, get_session_history) + config: RunnableConfig = {"configurable": {"session_id": "1_async"}} + output = await with_history.ainvoke([HumanMessage(content="hello")], config) + assert output == "you said: hello" + output = await with_history.ainvoke([HumanMessage(content="good bye")], config) + assert output == "you said: hello\ngood bye" + assert store == { + "1_async": ChatMessageHistory( + messages=[ + HumanMessage(content="hello"), + AIMessage(content="you said: hello"), + HumanMessage(content="good bye"), + AIMessage(content="you said: hello\ngood bye"), + ] + ) + } + + def test_input_dict() -> None: runnable = RunnableLambda( lambda input: "you said: " @@ -82,6 +107,28 @@ def test_input_dict() -> None: assert output == "you said: hello\ngood bye" +async def test_input_dict_async() -> None: + runnable = RunnableLambda( + lambda input: "you said: " + + "\n".join( + str(m.content) for m in input["messages"] if isinstance(m, HumanMessage) + ) + ) + get_session_history = _get_get_session_history() + with_history = RunnableWithMessageHistory( + runnable, get_session_history, input_messages_key="messages" + ) + config: RunnableConfig = {"configurable": {"session_id": "2_async"}} + output = await with_history.ainvoke( + {"messages": [HumanMessage(content="hello")]}, config + ) + assert output == "you said: hello" + output = await with_history.ainvoke( + {"messages": [HumanMessage(content="good bye")]}, config + ) + assert output == "you said: hello\ngood bye" + + def test_input_dict_with_history_key() -> None: runnable = RunnableLambda( lambda input: "you said: " @@ -104,6 +151,28 @@ def test_input_dict_with_history_key() -> None: assert output == "you said: hello\ngood bye" +async def test_input_dict_with_history_key_async() -> None: + runnable = RunnableLambda( + lambda input: "you said: " + + "\n".join( + [str(m.content) for m in input["history"] if isinstance(m, HumanMessage)] + + [input["input"]] + ) + ) + get_session_history = _get_get_session_history() + with_history = RunnableWithMessageHistory( + runnable, + get_session_history, + input_messages_key="input", + history_messages_key="history", + ) + config: RunnableConfig = {"configurable": {"session_id": "3_async"}} + output = await with_history.ainvoke({"input": "hello"}, config) + assert output == "you said: hello" + output = await with_history.ainvoke({"input": "good bye"}, config) + assert output == "you said: hello\ngood bye" + + def test_output_message() -> None: runnable = RunnableLambda( lambda input: AIMessage( @@ -132,41 +201,82 @@ def test_output_message() -> None: assert output == AIMessage(content="you said: hello\ngood bye") -def test_input_messages_output_message() -> None: - class LengthChatModel(BaseChatModel): - """A fake chat model that returns the length of the messages passed in.""" - - def _generate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - """Top Level call""" - return ChatResult( - generations=[ - ChatGeneration(message=AIMessage(content=str(len(messages)))) +async def test_output_message_async() -> None: + runnable = RunnableLambda( + lambda input: AIMessage( + content="you said: " + + "\n".join( + [ + str(m.content) + for m in input["history"] + if isinstance(m, HumanMessage) ] + + [input["input"]] ) + ) + ) + get_session_history = _get_get_session_history() + with_history = RunnableWithMessageHistory( + runnable, + get_session_history, + input_messages_key="input", + history_messages_key="history", + ) + config: RunnableConfig = {"configurable": {"session_id": "4_async"}} + output = await with_history.ainvoke({"input": "hello"}, config) + assert output == AIMessage(content="you said: hello") + output = await with_history.ainvoke({"input": "good bye"}, config) + assert output == AIMessage(content="you said: hello\ngood bye") + + +class LengthChatModel(BaseChatModel): + """A fake chat model that returns the length of the messages passed in.""" + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Top Level call""" + return ChatResult( + generations=[ChatGeneration(message=AIMessage(content=str(len(messages))))] + ) + + @property + def _llm_type(self) -> str: + return "length-fake-chat-model" - @property - def _llm_type(self) -> str: - return "length-fake-chat-model" +def test_input_messages_output_message() -> None: runnable = LengthChatModel() get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory( runnable, get_session_history, ) - config: RunnableConfig = {"configurable": {"session_id": "4"}} + config: RunnableConfig = {"configurable": {"session_id": "5"}} output = with_history.invoke([HumanMessage(content="hi")], config) assert output.content == "1" output = with_history.invoke([HumanMessage(content="hi")], config) assert output.content == "3" +async def test_input_messages_output_message_async() -> None: + runnable = LengthChatModel() + get_session_history = _get_get_session_history() + with_history = RunnableWithMessageHistory( + runnable, + get_session_history, + ) + config: RunnableConfig = {"configurable": {"session_id": "5_async"}} + output = await with_history.ainvoke([HumanMessage(content="hi")], config) + assert output.content == "1" + output = await with_history.ainvoke([HumanMessage(content="hi")], config) + assert output.content == "3" + + def test_output_messages() -> None: runnable = RunnableLambda( lambda input: [ @@ -190,13 +300,43 @@ def test_output_messages() -> None: input_messages_key="input", history_messages_key="history", ) - config: RunnableConfig = {"configurable": {"session_id": "5"}} + config: RunnableConfig = {"configurable": {"session_id": "6"}} output = with_history.invoke({"input": "hello"}, config) assert output == [AIMessage(content="you said: hello")] output = with_history.invoke({"input": "good bye"}, config) assert output == [AIMessage(content="you said: hello\ngood bye")] +async def test_output_messages_async() -> None: + runnable = RunnableLambda( + lambda input: [ + AIMessage( + content="you said: " + + "\n".join( + [ + str(m.content) + for m in input["history"] + if isinstance(m, HumanMessage) + ] + + [input["input"]] + ) + ) + ] + ) + get_session_history = _get_get_session_history() + with_history = RunnableWithMessageHistory( + runnable, # type: ignore + get_session_history, + input_messages_key="input", + history_messages_key="history", + ) + config: RunnableConfig = {"configurable": {"session_id": "6_async"}} + output = await with_history.ainvoke({"input": "hello"}, config) + assert output == [AIMessage(content="you said: hello")] + output = await with_history.ainvoke({"input": "good bye"}, config) + assert output == [AIMessage(content="you said: hello\ngood bye")] + + def test_output_dict() -> None: runnable = RunnableLambda( lambda input: { @@ -223,13 +363,46 @@ def test_output_dict() -> None: history_messages_key="history", output_messages_key="output", ) - config: RunnableConfig = {"configurable": {"session_id": "6"}} + config: RunnableConfig = {"configurable": {"session_id": "7"}} output = with_history.invoke({"input": "hello"}, config) assert output == {"output": [AIMessage(content="you said: hello")]} output = with_history.invoke({"input": "good bye"}, config) assert output == {"output": [AIMessage(content="you said: hello\ngood bye")]} +async def test_output_dict_async() -> None: + runnable = RunnableLambda( + lambda input: { + "output": [ + AIMessage( + content="you said: " + + "\n".join( + [ + str(m.content) + for m in input["history"] + if isinstance(m, HumanMessage) + ] + + [input["input"]] + ) + ) + ] + } + ) + get_session_history = _get_get_session_history() + with_history = RunnableWithMessageHistory( + runnable, + get_session_history, + input_messages_key="input", + history_messages_key="history", + output_messages_key="output", + ) + config: RunnableConfig = {"configurable": {"session_id": "7_async"}} + output = await with_history.ainvoke({"input": "hello"}, config) + assert output == {"output": [AIMessage(content="you said: hello")]} + output = await with_history.ainvoke({"input": "good bye"}, config) + assert output == {"output": [AIMessage(content="you said: hello\ngood bye")]} + + def test_get_input_schema_input_dict() -> None: class RunnableWithChatHistoryInput(BaseModel): input: Union[str, BaseMessage, Sequence[BaseMessage]] @@ -404,3 +577,114 @@ def get_session_history(user_id: str, conversation_id: str) -> ChatMessageHistor ] ), } + + +async def test_using_custom_config_specs_async() -> None: + """Test that we can configure which keys should be passed to the session factory.""" + + def _fake_llm(input: Dict[str, Any]) -> List[BaseMessage]: + messages = input["messages"] + return [ + AIMessage( + content="you said: " + + "\n".join( + str(m.content) for m in messages if isinstance(m, HumanMessage) + ) + ) + ] + + runnable = RunnableLambda(_fake_llm) + store = {} + + def get_session_history(user_id: str, conversation_id: str) -> ChatMessageHistory: + if (user_id, conversation_id) not in store: + store[(user_id, conversation_id)] = ChatMessageHistory() + return store[(user_id, conversation_id)] + + with_message_history = RunnableWithMessageHistory( + runnable, # type: ignore + get_session_history=get_session_history, + input_messages_key="messages", + history_messages_key="history", + history_factory_config=[ + ConfigurableFieldSpec( + id="user_id", + annotation=str, + name="User ID", + description="Unique identifier for the user.", + default="", + is_shared=True, + ), + ConfigurableFieldSpec( + id="conversation_id", + annotation=str, + name="Conversation ID", + description="Unique identifier for the conversation.", + default=None, + is_shared=True, + ), + ], + ) + result = await with_message_history.ainvoke( + { + "messages": [HumanMessage(content="hello")], + }, + {"configurable": {"user_id": "user1_async", "conversation_id": "1_async"}}, + ) + assert result == [ + AIMessage(content="you said: hello"), + ] + assert store == { + ("user1_async", "1_async"): ChatMessageHistory( + messages=[ + HumanMessage(content="hello"), + AIMessage(content="you said: hello"), + ] + ) + } + + result = await with_message_history.ainvoke( + { + "messages": [HumanMessage(content="goodbye")], + }, + {"configurable": {"user_id": "user1_async", "conversation_id": "1_async"}}, + ) + assert result == [ + AIMessage(content="you said: goodbye"), + ] + assert store == { + ("user1_async", "1_async"): ChatMessageHistory( + messages=[ + HumanMessage(content="hello"), + AIMessage(content="you said: hello"), + HumanMessage(content="goodbye"), + AIMessage(content="you said: goodbye"), + ] + ) + } + + result = await with_message_history.ainvoke( + { + "messages": [HumanMessage(content="meow")], + }, + {"configurable": {"user_id": "user2_async", "conversation_id": "1_async"}}, + ) + assert result == [ + AIMessage(content="you said: meow"), + ] + assert store == { + ("user1_async", "1_async"): ChatMessageHistory( + messages=[ + HumanMessage(content="hello"), + AIMessage(content="you said: hello"), + HumanMessage(content="goodbye"), + AIMessage(content="you said: goodbye"), + ] + ), + ("user2_async", "1_async"): ChatMessageHistory( + messages=[ + HumanMessage(content="meow"), + AIMessage(content="you said: meow"), + ] + ), + } From abe7566d7d484dd4a4e4ce64775d0fb018ad32f1 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Fri, 21 Jun 2024 08:14:03 -0700 Subject: [PATCH 6/9] core[minor]: BaseChatModel with_structured_output implementation (#22859) --- .../language_models/chat_models.py | 138 ++++++++++++++++++ .../unit_tests/runnables/test_fallbacks.py | 5 +- .../llms/ollama_functions.py | 27 +--- .../llms/test_ollama_functions.py | 1 + 4 files changed, 142 insertions(+), 29 deletions(-) diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index dfc9e94e703c9..329e43a4bc7af 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -5,6 +5,7 @@ import uuid import warnings from abc import ABC, abstractmethod +from operator import itemgetter from typing import ( TYPE_CHECKING, Any, @@ -54,10 +55,13 @@ ) from langchain_core.prompt_values import ChatPromptValue, PromptValue, StringPromptValue from langchain_core.pydantic_v1 import Field, root_validator +from langchain_core.runnables import RunnableMap, RunnablePassthrough from langchain_core.runnables.config import ensure_config, run_in_executor from langchain_core.tracers._streaming import _StreamingCallbackHandler +from langchain_core.utils.function_calling import convert_to_openai_tool if TYPE_CHECKING: + from langchain_core.output_parsers.base import OutputParserLike from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import Runnable, RunnableConfig from langchain_core.tools import BaseTool @@ -1024,6 +1028,140 @@ def bind_tools( ) -> Runnable[LanguageModelInput, BaseMessage]: raise NotImplementedError() + def with_structured_output( + self, + schema: Union[Dict, Type[BaseModel]], + *, + include_raw: bool = False, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]: + """Model wrapper that returns outputs formatted to match the given schema. + + Args: + schema: The output schema as a dict or a Pydantic class. If a Pydantic class + then the model output will be an object of that class. If a dict then + the model output will be a dict. With a Pydantic class the returned + attributes will be validated, whereas with a dict they will not be. If + `method` is "function_calling" and `schema` is a dict, then the dict + must match the OpenAI function-calling spec. + include_raw: If False then only the parsed structured output is returned. If + an error occurs during model output parsing it will be raised. If True + then both the raw model response (a BaseMessage) and the parsed model + response will be returned. If an error occurs during output parsing it + will be caught and returned as well. The final output is always a dict + with keys "raw", "parsed", and "parsing_error". + + Returns: + A Runnable that takes any ChatModel input and returns as output: + + If include_raw is True then a dict with keys: + raw: BaseMessage + parsed: Optional[_DictOrPydantic] + parsing_error: Optional[BaseException] + + If include_raw is False then just _DictOrPydantic is returned, + where _DictOrPydantic depends on the schema: + + If schema is a Pydantic class then _DictOrPydantic is the Pydantic + class. + + If schema is a dict then _DictOrPydantic is a dict. + + Example: Function-calling, Pydantic schema (method="function_calling", include_raw=False): + .. code-block:: python + + from langchain_core.pydantic_v1 import BaseModel + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + + # -> AnswerWithJustification( + # answer='They weigh the same', + # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' + # ) + + Example: Function-calling, Pydantic schema (method="function_calling", include_raw=True): + .. code-block:: python + + from langchain_core.pydantic_v1 import BaseModel + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), + # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), + # 'parsing_error': None + # } + + Example: Function-calling, dict schema (method="function_calling", include_raw=False): + .. code-block:: python + + from langchain_core.pydantic_v1 import BaseModel + from langchain_core.utils.function_calling import convert_to_openai_tool + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + dict_schema = convert_to_openai_tool(AnswerWithJustification) + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output(dict_schema) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'answer': 'They weigh the same', + # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' + # } + """ # noqa: E501 + if kwargs: + raise ValueError(f"Received unsupported arguments {kwargs}") + + from langchain_core.output_parsers.openai_tools import ( + JsonOutputKeyToolsParser, + PydanticToolsParser, + ) + + if self.bind_tools is BaseChatModel.bind_tools: + raise NotImplementedError( + "with_structured_output is not implemented for this model." + ) + llm = self.bind_tools([schema], tool_choice="any") + if isinstance(schema, type) and issubclass(schema, BaseModel): + output_parser: OutputParserLike = PydanticToolsParser( + tools=[schema], first_tool_only=True + ) + else: + key_name = convert_to_openai_tool(schema)["function"]["name"] + output_parser = JsonOutputKeyToolsParser( + key_name=key_name, first_tool_only=True + ) + if include_raw: + parser_assign = RunnablePassthrough.assign( + parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None + ) + parser_none = RunnablePassthrough.assign(parsed=lambda _: None) + parser_with_fallback = parser_assign.with_fallbacks( + [parser_none], exception_key="parsing_error" + ) + return RunnableMap(raw=llm) | parser_with_fallback + else: + return llm | output_parser + class SimpleChatModel(BaseChatModel): """Simplified implementation for a chat model to inherit from. diff --git a/libs/core/tests/unit_tests/runnables/test_fallbacks.py b/libs/core/tests/unit_tests/runnables/test_fallbacks.py index ba9091a190219..69c28e8d46788 100644 --- a/libs/core/tests/unit_tests/runnables/test_fallbacks.py +++ b/libs/core/tests/unit_tests/runnables/test_fallbacks.py @@ -334,7 +334,7 @@ def bind_tools( def with_structured_output( self, schema: Union[Dict, Type[BaseModel]], **kwargs: Any ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]: - return self | (lambda x: {"foo": self.foo}) + return RunnableLambda(lambda x: {"foo": self.foo}) @property def _llm_type(self) -> str: @@ -388,6 +388,3 @@ def test_fallbacks_getattr_runnable_output() -> None: for fallback in llm_with_fallbacks_with_tools.fallbacks ) assert llm_with_fallbacks_with_tools.runnable.kwargs["tools"] == [] - - with pytest.raises(NotImplementedError): - llm_with_fallbacks.with_structured_output({}) diff --git a/libs/experimental/langchain_experimental/llms/ollama_functions.py b/libs/experimental/langchain_experimental/llms/ollama_functions.py index 0329ddeecda45..93b770f79ff89 100644 --- a/libs/experimental/langchain_experimental/llms/ollama_functions.py +++ b/libs/experimental/langchain_experimental/llms/ollama_functions.py @@ -6,7 +6,6 @@ Callable, Dict, List, - Literal, Optional, Sequence, Type, @@ -14,7 +13,6 @@ TypeVar, Union, cast, - overload, ) from langchain_community.chat_models.ollama import ChatOllama @@ -72,7 +70,6 @@ } _BM = TypeVar("_BM", bound=BaseModel) -_DictOrPydanticClass = Union[Dict[str, Any], Type[_BM]] _DictOrPydantic = Union[Dict, _BM] @@ -151,33 +148,13 @@ def bind_tools( ) -> Runnable[LanguageModelInput, BaseMessage]: return self.bind(functions=tools, **kwargs) - @overload def with_structured_output( self, - schema: Optional[_DictOrPydanticClass] = None, - *, - include_raw: Literal[True] = True, - **kwargs: Any, - ) -> Runnable[LanguageModelInput, _AllReturnType]: - ... - - @overload - def with_structured_output( - self, - schema: Optional[_DictOrPydanticClass] = None, - *, - include_raw: Literal[False] = False, - **kwargs: Any, - ) -> Runnable[LanguageModelInput, _DictOrPydantic]: - ... - - def with_structured_output( - self, - schema: Optional[_DictOrPydanticClass] = None, + schema: Union[Dict, Type[BaseModel]], *, include_raw: bool = False, **kwargs: Any, - ) -> Runnable[LanguageModelInput, _DictOrPydantic]: + ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]: """Model wrapper that returns outputs formatted to match the given schema. Args: diff --git a/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py b/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py index fd7a065135ded..b359233aaa933 100644 --- a/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py +++ b/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py @@ -135,6 +135,7 @@ def test_ollama_structured_output_raw(self) -> None: structured_llm = model.with_structured_output(Joke, include_raw=True) res = structured_llm.invoke("Tell me a joke about cars") + assert isinstance(res, dict) assert "raw" in res assert "parsed" in res assert isinstance(res["raw"], AIMessage) From 75c7c3a1a7bda61cf78d8c371fb5c46157417973 Mon Sep 17 00:00:00 2001 From: ccurme Date: Fri, 21 Jun 2024 11:15:29 -0400 Subject: [PATCH 7/9] openai: release 0.1.9 (#23263) --- libs/partners/openai/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index 527f7da96dcd8..046df4edfa025 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-openai" -version = "0.1.8" +version = "0.1.9" description = "An integration package connecting OpenAI and LangChain" authors = [] readme = "README.md" From 0deb98ac0c907dfbbb3eb18c08ad9195ec5a3b7d Mon Sep 17 00:00:00 2001 From: Vwake04 Date: Fri, 21 Jun 2024 21:16:01 +0530 Subject: [PATCH 8/9] pinecone: Fix multiprocessing issue in PineconeVectorStore (#22571) **Description:** Currently, the `langchain_pinecone` library forces the `async_req` (asynchronous required) argument to Pinecone to `True`. This design choice causes problems when deploying to environments that do not support multiprocessing, such as AWS Lambda. In such environments, this restriction can prevent users from successfully using `langchain_pinecone`. This PR introduces a change that allows users to specify whether they want to use asynchronous requests by passing the `async_req` parameter through `**kwargs`. By doing so, users can set `async_req=False` to utilize synchronous processing, making the library compatible with AWS Lambda and other environments that do not support multithreading. **Issue:** This PR does not address a specific issue number but aims to resolve compatibility issues with AWS Lambda by allowing synchronous processing. **Dependencies:** None, that I'm aware of. --------- Co-authored-by: Erick Friis --- .../langchain_pinecone/vectorstores.py | 25 ++++++++---- .../integration_tests/test_vectorstores.py | 39 +++++++++++++++++++ 2 files changed, 56 insertions(+), 8 deletions(-) diff --git a/libs/partners/pinecone/langchain_pinecone/vectorstores.py b/libs/partners/pinecone/langchain_pinecone/vectorstores.py index 34b3f8521131d..1c21179f644e0 100644 --- a/libs/partners/pinecone/langchain_pinecone/vectorstores.py +++ b/libs/partners/pinecone/langchain_pinecone/vectorstores.py @@ -161,19 +161,26 @@ def add_texts( chunk_ids = ids[i : i + embedding_chunk_size] chunk_metadatas = metadatas[i : i + embedding_chunk_size] embeddings = self._embedding.embed_documents(chunk_texts) - async_res = [ + vector_tuples = zip(chunk_ids, embeddings, chunk_metadatas) + if async_req: + # Runs the pinecone upsert asynchronously. + async_res = [ + self._index.upsert( + vectors=batch_vector_tuples, + namespace=namespace, + async_req=async_req, + **kwargs, + ) + for batch_vector_tuples in batch_iterate(batch_size, vector_tuples) + ] + [res.get() for res in async_res] + else: self._index.upsert( - vectors=batch, + vectors=vector_tuples, namespace=namespace, async_req=async_req, **kwargs, ) - for batch in batch_iterate( - batch_size, zip(chunk_ids, embeddings, chunk_metadatas) - ) - ] - if async_req: - [res.get() for res in async_res] return ids @@ -412,6 +419,7 @@ def from_texts( upsert_kwargs: Optional[dict] = None, pool_threads: int = 4, embeddings_chunk_size: int = 1000, + async_req: bool = True, *, id_prefix: Optional[str] = None, **kwargs: Any, @@ -453,6 +461,7 @@ def from_texts( namespace=namespace, batch_size=batch_size, embedding_chunk_size=embeddings_chunk_size, + async_req=async_req, id_prefix=id_prefix, **(upsert_kwargs or {}), ) diff --git a/libs/partners/pinecone/tests/integration_tests/test_vectorstores.py b/libs/partners/pinecone/tests/integration_tests/test_vectorstores.py index 3d64cec29dfea..292d29b63cfe5 100644 --- a/libs/partners/pinecone/tests/integration_tests/test_vectorstores.py +++ b/libs/partners/pinecone/tests/integration_tests/test_vectorstores.py @@ -9,6 +9,7 @@ from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings from pinecone import PodSpec +from pytest_mock import MockerFixture from langchain_pinecone import PineconeVectorStore @@ -290,3 +291,41 @@ def test_from_texts_with_metadatas_benchmark( query = "What did the president say about Ketanji Brown Jackson" _ = docsearch.similarity_search(query, k=1, namespace=NAMESPACE_NAME) + + @pytest.fixture + def mock_pool_not_supported(self, mocker: MockerFixture) -> None: + """ + This is the error thrown when multiprocessing is not supported. + See https://github.com/langchain-ai/langchain/issues/11168 + """ + mocker.patch( + "multiprocessing.synchronize.SemLock.__init__", + side_effect=OSError( + "FileNotFoundError: [Errno 2] No such file or directory" + ), + ) + + @pytest.mark.usefixtures("mock_pool_not_supported") + def test_that_async_freq_uses_multiprocessing( + self, texts: List[str], embedding_openai: OpenAIEmbeddings + ) -> None: + with pytest.raises(OSError): + PineconeVectorStore.from_texts( + texts=texts, + embedding=embedding_openai, + index_name=INDEX_NAME, + namespace=NAMESPACE_NAME, + async_req=True, + ) + + @pytest.mark.usefixtures("mock_pool_not_supported") + def test_that_async_freq_false_enabled_singlethreading( + self, texts: List[str], embedding_openai: OpenAIEmbeddings + ) -> None: + PineconeVectorStore.from_texts( + texts=texts, + embedding=embedding_openai, + index_name=INDEX_NAME, + namespace=NAMESPACE_NAME, + async_req=False, + ) From 4c97a9ee5347ea8695f37539b5aaa96810f50e78 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Fri, 21 Jun 2024 09:10:03 -0700 Subject: [PATCH 9/9] docs: fix message transformer docstrings (#23264) --- libs/core/langchain_core/messages/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index 777f83cf00de3..263bc05fb7114 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -296,6 +296,7 @@ def wrapped( partial(func, **kwargs), name=getattr(func, "__name__") ) + wrapped.__doc__ = func.__doc__ return wrapped