diff --git a/README.md b/README.md index ee4f1018ed27f..f3e64b60cdb7f 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ Please see [here](https://python.langchain.com) for full documentation, which in - [πŸ¦œπŸ› οΈ LangSmith](https://docs.smith.langchain.com/): Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production. - [πŸ¦œπŸ•ΈοΈ LangGraph](https://langchain-ai.github.io/langgraph/): Create stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it. -- [πŸ¦œπŸ“ LangServe](https://python.langchain.com/docs/langserve): Deploy LangChain runnables and chains as REST APIs. +- [πŸ¦œπŸ•ΈοΈ LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform): Deploy LLM applications built with LangGraph into production. ## πŸ’ Contributing diff --git a/docs/docs/how_to/self_query.ipynb b/docs/docs/how_to/self_query.ipynb index 06151f12d3d05..b85f4c9596449 100644 --- a/docs/docs/how_to/self_query.ipynb +++ b/docs/docs/how_to/self_query.ipynb @@ -95,7 +95,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains.query_constructor.base import AttributeInfo\n", + "from langchain.chains.query_constructor.schema import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", "from langchain_openai import ChatOpenAI\n", "\n", diff --git a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb index c5830e7db688f..59a4c2fa518c6 100644 --- a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb @@ -136,7 +136,7 @@ }, "outputs": [], "source": [ - "from langchain.chains.query_constructor.base import AttributeInfo\n", + "from langchain.chains.query_constructor.schema import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", "from langchain_openai import OpenAI\n", "\n", diff --git a/docs/docs/introduction.mdx b/docs/docs/introduction.mdx index 68d7aaa41868d..b3edcfbd15dff 100644 --- a/docs/docs/introduction.mdx +++ b/docs/docs/introduction.mdx @@ -33,7 +33,7 @@ Concretely, the framework consists of the following open-source libraries: - **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture. - **`langchain-community`**: Third-party integrations that are community maintained. - **[LangGraph](https://langchain-ai.github.io/langgraph)**: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it. -- **[LangServe](/docs/langserve)**: Deploy LangChain chains as REST APIs. +- **[LangGraphPlatform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform)**: Deploy LLM applications built with LangGraph to production. - **[LangSmith](https://docs.smith.langchain.com)**: A developer platform that lets you debug, test, evaluate, and monitor LLM applications. diff --git a/docs/docs/tutorials/summarization.ipynb b/docs/docs/tutorials/summarization.ipynb index c63f4c6479140..1669f5f071dce 100644 --- a/docs/docs/tutorials/summarization.ipynb +++ b/docs/docs/tutorials/summarization.ipynb @@ -156,7 +156,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet tiktoken langchain langgraph beautifulsoup4\n", + "%pip install --upgrade --quiet tiktoken langchain langgraph beautifulsoup4 langchain-community\n", "\n", "# Set env var OPENAI_API_KEY or load from a .env file\n", "# import dotenv\n", diff --git a/docs/docs/versions/v0_3/index.mdx b/docs/docs/versions/v0_3/index.mdx index f8cba890d7420..f553ab1986616 100644 --- a/docs/docs/versions/v0_3/index.mdx +++ b/docs/docs/versions/v0_3/index.mdx @@ -132,7 +132,7 @@ should ensure that they are passing Pydantic 2 objects to these APIs rather than Pydantic 1 objects (created via the `pydantic.v1` namespace of pydantic 2). :::caution -While `v1` objets may be accepted by some of these APIs, users are advised to +While `v1` objects may be accepted by some of these APIs, users are advised to use Pydantic 2 objects to avoid future issues. ::: diff --git a/libs/community/langchain_community/vectorstores/opensearch_vector_search.py b/libs/community/langchain_community/vectorstores/opensearch_vector_search.py index 3e5bb280035a4..f08620eef6383 100644 --- a/libs/community/langchain_community/vectorstores/opensearch_vector_search.py +++ b/libs/community/langchain_community/vectorstores/opensearch_vector_search.py @@ -401,7 +401,7 @@ def __init__( self.is_aoss = _is_aoss_enabled(http_auth=http_auth) self.client = _get_opensearch_client(opensearch_url, **kwargs) self.async_client = _get_async_opensearch_client(opensearch_url, **kwargs) - self.engine = kwargs.get("engine") + self.engine = kwargs.get("engine", "nmslib") @property def embeddings(self) -> Embeddings: @@ -420,7 +420,7 @@ def __add( index_name = kwargs.get("index_name", self.index_name) text_field = kwargs.get("text_field", "text") dim = len(embeddings[0]) - engine = kwargs.get("engine", "nmslib") + engine = kwargs.get("engine", self.engine) space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512) @@ -461,7 +461,7 @@ async def __aadd( index_name = kwargs.get("index_name", self.index_name) text_field = kwargs.get("text_field", "text") dim = len(embeddings[0]) - engine = kwargs.get("engine", "nmslib") + engine = kwargs.get("engine", self.engine) space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512) @@ -530,7 +530,7 @@ def create_index( ) if is_appx_search: - engine = kwargs.get("engine", "nmslib") + engine = kwargs.get("engine", self.engine) space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512)