diff --git a/.github/scripts/get_min_versions.py b/.github/scripts/get_min_versions.py index 0838a6e7e3957f..8df28865ae9965 100644 --- a/.github/scripts/get_min_versions.py +++ b/.github/scripts/get_min_versions.py @@ -23,6 +23,7 @@ "langchain-community", "langchain", "langchain-text-splitters", + "numpy", "SQLAlchemy", ] diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index bf0c1e0454f20c..6dcbfcda11043c 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -219,7 +219,11 @@ jobs: # Replace all dashes in the package name with underscores, # since that's how Python imports packages with dashes in the name. - IMPORT_NAME="$(echo "$PKG_NAME" | sed s/-/_/g)" + if [ "$PKG_NAME" == "langchain-tests" ]; then + IMPORT_NAME="langchain_standard_tests" + else + IMPORT_NAME="$(echo "$PKG_NAME" | sed s/-/_/g)" + fi poetry run python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))" diff --git a/README.md b/README.md index ee4f1018ed27f0..f3e64b60cdb7fa 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ Please see [here](https://python.langchain.com) for full documentation, which in - [πŸ¦œπŸ› οΈ LangSmith](https://docs.smith.langchain.com/): Trace and evaluate your language model applications and intelligent agents to help you move from prototype to production. - [πŸ¦œπŸ•ΈοΈ LangGraph](https://langchain-ai.github.io/langgraph/): Create stateful, multi-actor applications with LLMs. Integrates smoothly with LangChain, but can be used without it. -- [πŸ¦œπŸ“ LangServe](https://python.langchain.com/docs/langserve): Deploy LangChain runnables and chains as REST APIs. +- [πŸ¦œπŸ•ΈοΈ LangGraph Platform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform): Deploy LLM applications built with LangGraph into production. ## πŸ’ Contributing diff --git a/docs/cassettes/llm_chain_1b2481f0.msgpack.zlib b/docs/cassettes/llm_chain_1b2481f0.msgpack.zlib index d8941b0e1a6d87..854400a94ec8ae 100644 --- a/docs/cassettes/llm_chain_1b2481f0.msgpack.zlib +++ b/docs/cassettes/llm_chain_1b2481f0.msgpack.zlib @@ -1 +1 @@ -eNqdVWtsFFUULrb4TvxjpPERx0aDms7szL63a4Nlu7SFttvultKl0XX2zt3daWfmTufe2e4WQUTkD4KODzQhosh2V2opkOIDFIwxRGLUaDRqMT4SH2DUKLHGIAre2W6lDfxyk93ZO+ec755zvu/cu6GUhQaWkbZgXNYINERA6AJbG0oGHDIhJhuLKiQZJBW6IrGeXaYhT92ZIUTHDQ6HqMsc0qEmyhxAqiMrOEBGJA76X1dgGaaQRFJ+6uiaOhViLKYhrmtg+tfUAUS30ghd1PUYooYVkUCGZCCTQoqChmUtzaQMpDJhLa3IOMPQzBDTRkRFFrW6eqbOQAq0g3EeE6jWra1n5mFm5FvmepkYGnVr76FvVCRBxX6V1gnrtn00uhLoExMDiipdpEQFQ/qC4uq0G8Q0bAye860tZaAo0V49WsggTKyJ+dXvFQGAFBVqAEm0AGtPekTW6xkJpuzqxmh6Giz31hobhFBnaTFZWJyJsvaJuq7IQLTtjgGMtPFKOSzJ6/BC85hdE0sbqhHrQIQm0dTm6MpTmjRG4Nx+jt+XYzERZU2hfWcVkeZT1Mv21+cadBEMUhC2IgGrOBM8MdcHYWu0QwSR2DxI0QAZa1Q0VK97cu57w9SIrEKrFOq6cLuK8fx2Lk4QON/+ecA4rwFrtEzDq/OCITHyLEAUw9rJT8z2R4FammSsXULA96IBsU5FBx8q0jBi4g0FygV871ipor4XIitmSfyq6rpCM+XFOrzMkOsZp4+JQZ1x8k43I3gaPIEGt4dp6egZD1W26bkoDfvL6k1RKsKztJdAxtQGoTQWuijhh23CaTV2+lSfLMzpCEO2kpU13sdGZ8aObWuenFEXi4y0qMkj5W2tw2Xmh0dywxIwJSmTHVb5wIjbJSehCVIHKiG6gextaEKsiq1dHsE1UbHM9n6M1sqzAs/ywkEqfhlQqdnF6MggLIaADjrJW1P1qpizddboEjwuL8/zQTqMQDElGDOTzUil7OAgoxtQQaJ0KMfSiYGKrMqUmPJv5RDBVsFDg1+70IGgQUiPm1E/NfNH5toNaKPbJZwHcQcCgTcu7jQL5AsEvIFD830wnJuJ4FTxaxc6VAAKTpeKx3Oz/qwsWVO30kVCTPHJJO93wYAERJfkSfmdHp/g9fqB4BS9EtwbWsaGRJCBbKysPqvUHO9s6mgLjcUoegihQRk+fnxBdSIBUomk2hhP5Nyc2aeoruYYkHFPZ9ynu4QeLrBU7423rFTTaLi1vSvHmRLlyef0uT1On9/DChzPCZzAJtoVXVo2EHP2upSRZr8ZjSbC0e62RC9Cq92mSgY6w5isgJ2puBBvXcUZuWTK2x02V/uGMvnWeIuYBss5X743G44sHUo2OaNxdbBVb6JciiTT6AgyVJkybUxjZT5YOh+sPR3eBufsdAQZqayARm7+WRhkWukNEdGUfJCOFZUSpE9RhTGZwMZOpMGpJ2kPzKwsNQ4o7X7BuTI+1J7Melr5UDfn9iXwgAupnTmva6CbI95m4guhgLdtThOEgJ/lK33w8u6ydvjzqf/PrF7pY+eOOxvRZ67CkoawJqdSxRg06PhYY0BBpkSPdQMWKefRprh1wA8CIOmSUv6A3+VPAR+7lB6Ys2j/HQ4F+04o34kPFu2h09JHF4Rv3nx5VflTTb/nzpHHhMjb/LUb//z7yt9/uO7lDub2/v6aen4fs7HruwbH7rdOTSqr2LG/vra+dF9RvWLTmfufXjPcXlsdb3rrbuXgoj3dH3/fsPcoeOX4m9yqyLsnT//w/bpz63/84uzpf6qe+Ob59xdd+cDQPVcNbFnqaHtu/75jjZs+yR85Nrk9vO7p8IfRh+/8/PpvS1MkG4wE39lWuPuyhWdaa8HPtW/u3D7xW9XC7R3c8s0fnLp058mbhUu39XStDy5+9sTxt6X1Gw603PTTxG2ll4TJvl9vvObZ6c7dnUv+WBL7pHbFDZ+dunfv6Datf/V3W8nl2R1XN48srz3zSMsvZ6drwG7PkenpLQ013KHt9z2D1svv3JWfWLZjekkkvvNE9OBHnD6x2HTVvPfX1lOt+pIPQP+nUV9my1NDRf/RO04vtHtVXbW8pufemy6pqvoXOupqkg== \ No newline at end of file +eNqdVX1sE+cZT0B0qKgd3daxqWw73KqbSu5ydz5/xfM2xw6J58QOcWgI3Rq9vnvtu/ju3uM+/BGCplKmUVGJXkcrbR10LcZOoizAkjEGI+oWLZoKq9YKFaWldGu3BtRSVU2r/VEV9p7jjETw1+6Pu3vf9/n8/Z7nefdU81A3JKQ2jkuqCXXAm3hh2HuqOtxpQcPcW1GgKSKh3J1M9R6xdGnuIdE0NaOluRloEoU0qAKJ4pHSnGeaeRGYzfhfk2HNTDmNhNLc7C6XAg0DZKHhaiEe2eXiEXalmnjh6tWBasjAhIQpQiKDZBkVJDVLZHSkEG1qVpYMkcCRISJmAlkCqquJcOlIho6yUTJMqLh2NxErbIrSpuVSlgF11+4f4x0FCVB2trKaSXLIEVLxksFfw9QhUPAiA2QD4g1sWMNwmJbuGKEp3+6qCIGAwTpQFpFh2hMr0z8GeB5is1DlkYAzsH+THZK0JkKAGSe9MRyfCmvg2mM5CDUSZ5OHlUUt+zjQNFnigXPePGggdbyeD2mWNHjr8ZiTFIkRVU17KomDCMeau0uYJ5VgKA9LsceLpGECSZUx8KQMcDwVrXZ+ZvmBBvgcNkLWa8CuLCpPLJdBhn20C/DJ1AqTQOdF+yjQFS83uXxft1RTUqBdjXTf6q5+eNOdm2IYyndihWGjpPL20RoNv1+hDE29RPII27BfoCeW8JGhmjVF+wgT8I/o0NBw1cHHK1jNtIw9ZcwFPP/Xar38XkzGl0i83LChHMW82Gf7oNBEMG4igfIES7McfrUwXAvrJdq7escjdTe9t6XhRK18M5iKtiXaq7xoqTkojEVuS/hZh3CcjRM+LlASFjVkQLIelT2+nexZ7DsyFp1crC4S6VmgSkM1t/bZGvOFoWJB4C1BEPMFhQ4McW4pDS0+M1VX0XTkuMEBkYphlzl3wD1RP1oCfwwnS5MMTdLM6SKJKx3KkiJhQGvvevdjXQ9N06duFTBRDuI5UfHTzjO9XECHCibN8X3TChcIBP54e6ElSz4s4g2cXilkwOWxMKxinLpVoG7hCD4dLy6Jk5Jgzz2AFwOsL+BxwwAHPZ6MD3qghwdpkM54PCz0s5Dx/QE3v8RjMw6ZGtJN0oA8nnRmyZ5rUkDR6bOQm/G4vTjTIJ5GvGwJMGWlo8jJwQgSmg5lBIRjkS1kBPAiJFO1+rOr0f5EuCsWGUvhKCMI5ST49BuNqwcG+MxAWglFslsUQWkTOQADrVsf3trf2Z2Ss5FSoC2W2JFPx8F2X7idy5S6k2GS8eEIWL/f6yUZiqYYiiG5AQpFfJ5kh8aFt7WlVSHf0b1jW/uOIcrYQgl+fyfnicbZbYXWjnQ417MzkFZLBa53J62gMOqRmbQvzirdPX28mUAdRkEd9LVTHi6LswGmGGoOErg2JQxwqN4hJO4QcrE/uKX+CBJCDYMQtXIaBokOfEkkVbkUJFIOmBB/gQJTkglDCaTCuYMYAysvCaFYbzIeB34GJeStkUJKTLhzecjusOJ0f7vpLj5ctAa9lBXlEqhtGQgM5yPpOg5emluswpuh/59RndxOLm94Mqkt3oZVFRmqlMlUUlDH/WOP8TKyBDzYdVjBnPeE++0pP2QZr+ALsALr4/gMJFvxyFyy9r/xUHZuhSqQcY3leXtSdIdcGE23K0goIOT3cjRduzMfqzg1qWb/0jj8rf1rG2rP6ief3hqfob/y07c/25w+3Hf1dODi2bu+cWX/vi/fG/3hs5dO/kJSO1jt5etnyu+/fve9w1dnPv8wEJo/OXDPpt47W+4/2Hd+4Y2R4vVTHZ9+3BYiN/z8kz89uqHn0lvDly+j5y8uPHn3nlV9uX9c/3XL/Pb71jz64mVefay0dmLm3KsfuQ4lvj1zx4Xv/GBNvzT83Vl/12s7f9X5rvDanZsPbtx4yL64b33r9C+z+0bWDP/967E7fha765UTX7rywano2t+N/HkvO19e6NzVcoVovxb2jR57YGbye5Pd8sZ1r36yZTQ/HZzuBFLTxI/OHVhoHbw6dmmVMfL4iQVRjBTu2YS6Rv8WSww2vT47/823w1+YmboWPbM5r11gDxwePfwe/X3m3Jrig8+8/9ZefXbz+kO/DRJvrn0itP7Th26c//i5/0yfvPDvn3wQfCnwtS+6fF/d+6/q7jOedf/c8FnuWvGdB2nrkfkXXlk3Gj8++/LnfVTn4Ubhqa7KS+LU/unGhoYbN1Y3rFv1/HMfrmpo+C/xZpFc \ No newline at end of file diff --git a/docs/cassettes/llm_chain_3e45595a.msgpack.zlib b/docs/cassettes/llm_chain_3e45595a.msgpack.zlib index df51bda179f41d..481dc32e51e9c3 100644 --- a/docs/cassettes/llm_chain_3e45595a.msgpack.zlib +++ b/docs/cassettes/llm_chain_3e45595a.msgpack.zlib @@ -1 +1 @@ -eNptVQ1sE2UY3iCKI/GHIEQiQlkkKtnd7vp37caQrRvb2Fi3dowV0Hq9+9rednffcd93WzsCxkFUQiRcECIhUXFda+bcRkD+BBOVP6NkigacGogGIYZEwWggRsDvuk62jEv6833v+z7v3/O+153pADqSoJrfL6kY6LyAyQGZ3RkdrDMAwpvTCsBxKKYa/cHmHkOXRhbFMdZQSXExr0k01IDKS7QAleIOtliI87iY/NdkkIVJRaCYHDm8vlABCPExgApLbGvWFwqQuFIxORQ267yKZB4DG44DWxTKMuyU1JiNBANtEuZliVdLCotshTqUgWWAkggDpXBDkW0CTlwar2QgoBdueIHcKFAEsnUV0zDltHRUcmLJL8I64BVyiPIyAuSCwGqkANjQLQyG5jZk4oAXSXku5j2WikOEzYGJKQ/yggAILlAFKJKozQ9jXZJWZBNB1Eqpj8SngmxBzb52ADSKpNMB0qNW5hCvabIk8Ja8uA1BtT+XD4WTGpgs7rOyokgVVWwe8JMgymuLG5OkN6qNpZ0emhlKUAjzkiqTYlMyT+JJa1n5x+MFGi+0ExAq13czPWo8MF4HIrN3BS/4gxMgeV2Im728rrid+8ff64aKJQWYGV/jZHc54T13DpplaW7fBGCUVAWzN9uIQxOMAdaTlAAJhrmXSQsQtkvAHPkzHBai4YhSFgonnLTRKiuOyqAgoeaGEKc52GbaW6G1hKpXKjHYWVPfmKANkaFYzs45XXbO46JYmqFZmqXC9bImLmsL2lscclelxwgEwlWBptpwC4SrnYaC2xqqEK4DDdEQG6pZReuJSNTdVGWs5tbFkzWhaj4mLKe5ZEtHlb9iXaTcHggp7TVaeamNRGd0SGJZm1zvYe0rQ+vqIx2uGsbXRDu5MGpzQKUh4Xa0NdHYXYk5H/S6a8eFx3o9FJOL0M04PYz1DIxxQwZqDMfNHtbjfF8HSCNTBjalScmwgbpThIfgqzOZ3Li956+7R+FZqUrCSfP4Ml0qstk5WxBoNjtjd9pYV4nLW+LkbNUrmvt9OTfN96Xgvuy4RgkNq8YonxHihtoOxD7ffcl+3CI76aQVPplOCiQ0iACVi8rsb6UCo3uGqq3cPzpZFNRjvCp1Zd2ax7Os7+xKdIqCIYrxjk6F8XY5HVIEGEL0QM5E06HlhgREKcjscbDugZxkjHd9JFdCAoZi2CNk9CWBjJmVjAZ1TCEgkM2Gk+ZIkcInrBkrc7Auh5sUvpSsIkE2RBA0IpVQIcxEpTZNBzLkxaMJiuwLIEuKRBqT/c5tTWSmXMT48GQFDNsB2a+92bZ+Ml6uAwvdSuEeiNPr9R67v9IYEOf1cvajE3UQGB8Ja1fQ4ckKOYCUnVVQf2JMn5JEc+RpcgjbgV2Ielgv7+IjEYHxsC7ew7HAzXgZ4Ha77YO+ZZSPF+KACmbZZ2YqQw3lK2p9B1up8TSi/NroOyWjQqRK0Wg6CHTSFrNPkKEhklWpgzTBCpSHzAMewStEHEAUIizriQocVUGW0Bja/6RLWXs2+3J5JW01U42dzK+Yv/WhvOwzlXzu3sXbWfgZ8/jmm/9Ov3a2ceFzC599si3zSK/vteUrZ+2+vPCwZ+PA7e1LX7/7oreg4KnWxT9dLX3nj7kF+X3da5ce2bH2YMvwheHQG1d+5ZacWx+5uefXH46d+u3HK8ifefStB6YpLw9d/uDa3tT5NrEqfbrX98UTgR1FZ2f7pYOrO97tnW4MuXZd3Zo8Vj9v2ultzVvm6TdmFSw4caTUxwV/nqHPGB5YvvXilw8umF9ckL8NNy7tGlxc9zmz56XaV7/5y9iZWnN5aElizqLdm85v2jjn0rnvWqvXPH+9aXBn08ZVnjffrj/zd/3skq4tm2/9fgffWjWzv+e8/umB6plTdqycP+3brpn1006dCFw6c6PM37Sgrvf22aKvLwTTu6f8sk+4WXv9meHvZ39kf7jkWvOhAHdy4T9TrUJNzbtTtWzX3Cl5ef8BjH8o1g== \ No newline at end of file +eNptVX1sE2UYH5AsKGSggkii8WxQkOy6u/b6tTml7cY2C+3YxsZmZLy9e9vednfvcR/tuoHIlxGB6AExGJc42WhhjjHkQ4RhNAZiBBQlIAODIwYSjGGIwUjE4HtdJ1vg/rj2fZ/n/T2/53l/z3Nr0nGoqDySxvXwkgYVwGp4oRpr0gpcrkNVW5cSoRZDXFdlqLqmU1f4gbkxTZPVwoICIPNWJEMJ8FYWiQVxuoCNAa0A/5cFmIHpCiMuOXCizSJCVQVRqFoKidfaLCzCoSQNLyw1CpBUAWiQ0GKQiCBBQAleihIRBYlEqRQVeDVGYGaIqNCAwAPJkk9YFCRA87CaVDUoWlbmE2MwY/xzo710FSqWla/jHRFxUDC3orJGMsh0kvCSxr+qpkAg4kUECCrEGxhYxuXQdMUEoayulekYBBwu1uWcqV0xpGpG79gC7AUsCzEwlFjE4RyMPdFWXs4nOBgxE+zGDCWYKa/R3QyhTOJ84jA1fMroA7Is8Cww7QVNKpJ6shmRWlKGD5q7zbRIXFNJMw6EMAlvRUFlEt+URNBWh81q62shVQ3wkoBLTwoA80nJGfvR0QYZsM0YhMyqwEgNH+4d7YNUY+dCwIaqx0AChY0ZO4EiOpn9o/cVXdJ4ERppf+WD4bLG++HsVpq2uvaNAVaTEmvszFzEZ2MOQ01JkizCGMbHVIpFqJmHxsCtxkY20hgWi/3R+SInlsYYAD2+RbWL6hdUVgtRf9JTWhFsiIcDYInLW8ZEkpUhL0m77LTD5nY7nSRtpay0lSaZRivyuxyhcpnxLi4NS1y8vLJhcVlDq1Wdb+Xc7gWMoyRgW5zwlYe9zVXLPWEpmWBqllMi8qIqgQ67AjaxsqqO1YKoXE1ITa4yq4OJFhGYnR7nueKKmlAgANw0CgqL/InqWNDeHIe2Bj1A1Zdp9pbaFr3JadVLmCAqHUWPZlwklWXopBg3ZT69I9oQoBTVYkYn7XHvUqAq456Da1O4ZJqurunCOoSnvklnm29HKHBfwk92lWBNGsfqIJdP0HYiiOKEjbIx+FVIM4U2J1G2sKbHnw1T81AJ7ss0bwTLsHRE8mk2pkvNkOv2P1Tsx0yx45s06eP2JGGLjFRIZlkZPUvIquGpQ1aU7B/uLBIpUSDxrZmwxrGM6hOtLQmO1TkuFk+IlKeVsfNhqLORA9kjsoLMMJgQKapGp81J92YtI7rrxrlSJE2RFH2khcRtDgVe5HE9M+/s6FONLgcu9uEHHTTUDPGQTA3fxhejHRQoYr2aoe+jMB6Pp//hTiNILuzi9BwZ66TC0Vxom6geftAhi9CJrT0tI+4kzxkDs/CikXW6AaAYB+WgKQ9nd1F2t9PF2AHkIu5IxBX+HE8+nsUw5l3KSNFIFbJ4zGtJYyBfBC3miCnGYrQ7caZFeBSzgs7Baj1cgswc1CJCVqCAALfXP5/0AzYGyeqM/Ix0SX3Qu7DCf2gJOVpHZEge/sSkJaRKfCSSqoYKvhejmxWQzuFZqcAUxqry1hsH3NBGOzmPkwIszbARSPrwFBpB+191XeagTQMBc4+zxv6YvdhSyDB2SxEhgmK3k6GozIdodcrMVYoeH5d4duPEnMwzYdMWb2jtvKnrh+6+o39/vLLhscMfnFi7ofWHD2nu6OS++MDA79bdm6+vcgVePLNrw4y8P0+39btuTp/qK61Qfcmvn0YHJw8tHWL/6W9Lvv3X9JduTAkpx67k3M47VO+bsXT1pSmDN3qOXLvQ9EiwoxBIb3pu9p68dO7WC8Gzs657Nz5fXvDJwUk/PXViUX9eJ7Mpt/Fyx+AyPnh5vG/ind8+ennH3Ctbr81qaJ9NLjnZ1HHncMlEx5x/19ma1+sLPEe2E2VX593e7Vpz5Vz7zF2FJ1cw5yo8p9799eSs8xcHz+etaK/zPXJpWu720qub9/gXtrVOnpgcCugzPLudF5rq3D+vvvXWH2c2BOvaB5uW5T2z1X46+Qao3Ta4/b14/IlvZ6KO9m1nx9XXPdq+elntnej7v/SHDq7vvse+Mu47b9GcSWLHtLufDh2IfHkvvGrP5tk3JlS39eVd3PF4d27nZvfKq6/OmDkvt37L+M6v6L9zc3Lu3ZuQ82N9ue/G+Jyc/wBrj1IS \ No newline at end of file diff --git a/docs/cassettes/llm_chain_3e82f933.msgpack.zlib b/docs/cassettes/llm_chain_3e82f933.msgpack.zlib deleted file mode 100644 index f2968c0213a7b7..00000000000000 --- a/docs/cassettes/llm_chain_3e82f933.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNptVWtsFFUULpSgEh8VRGOMYboaf5jOdGZfs1Mkpt2Wtva9Wyqr0XX2zt3daWfmTufeaXdbqxEJCVHUkYjvH6XLLja18mgEQYxRVDQIGo2mKBpf8YGvoMSoIXhnu5U2ZZJ93HvO+c7rO2c2FAaghVVkLJpQDQItGRB6wM6GggX7bYjJxrwOSRopuc6OaPeYbanTN6cJMXFNdbVsqhwyoSGrHEB69YBQDdIyqab/TQ0WYXIJpGSn3xr26BBjOQWxp4a5c9gDEHVlEHrwdFuygTWZQIakIZNEmoYGVSPFJC2kMw1GSlNxmqGRIaaZyJoqG54qxmMhDbrGOIsJ1D0jVcw8zLRaOVfLxtDyjNxFb3SkQM29SpmE9bs6Bj0J9BcTC8o6PSRlDUN6QXFNWg1iWy4Gz4kjhTSUFVqrL8oqcmmEiTM5P/+XZAAgxYUGQApNwXkxNaSaVYwCk25+4zRAAxar64z3QWiyNJ0BmJ+xcnbJpqmpQHbl1b0YGROlhFiSNeFC8bibFUtLahBnqoMGUdtc3ZmljTIYgfOHOH5XhsVEVg2NVp7VZBpP3izKD84VmDLooyBsiQROfsZ4cq4Ows6ONhl0ROdByhZIOztkSw/69869t2yDqDp0CuHOhe5KwvPufJwgcOLuecA4awBnR7ER++YZQ2JlWYAohjPK5wFCfSp0pk/H4yAZT+hrYvGMn7PXa7qvPgpU3N0eE02f0M1JdWZPrHGdnkKDTa2dGc5WeFYQvaI/4BVDAVbgeE7gBDbeqpnK2t6ot8enDdWH7Egk3hDpao73IHSH39ZJb3sDJi2wPRkTYk23c1YmkQx2Ndh3iP3pbFOsUU6B2zgx2zPQ0FHXn6j1RmJ6X5NZu5qh0dkDqrKmV2sNCd51sf7WxECgiQ93cX4xjnt9SG/PBH29XRwJ1hMxjKRg85zwBCnE8qUIg7w/xLvP5Cw3NGikSNoZEyRxpwWxSUcOPpinJSM23pCjPIRHjxRKs7e9o+U8hVfm6iknnUNrLbWK8YpMFJqMl/f6GSFQE5Bq/EGmsa17Ilxy031BCu4uzm6S0rBhlvIFkLaNPqiMhy9I9kMu2Wkn3fDpdLIwYyIM2VJUzsR6NjKzdNjm+r0zk8UiKyUb6lDRrXOoyPrBocygAmxFSQ8M6rw05PepCWiD5FTJxLSQ64YGxOrYGQt6xcmSZJZ34zRXSgKe5YVX6OirgI6Zm4yJLMJiCOiaI1lnukqXM+6MrfEJAV+QFn41XUVAsxUYtRP1SKfMxKsZ04IakpUDGZbuC6ipukobU/wurVDs5ALUeP9CBYL6IF22O4ptfW2u3IIuupvCeRC/JEmvXlhpFkiUpKB0YL4OhnMjEbw63r9QoQSQ8/p0PJGZ1WdVxZm+kR7iCb8EkkmJktIrepWkKCdlPiRBRQ4lFN4bkF4Kr2XDMkhDNlpkn1Ooj7XXtjWHX17PzqUR22HOvGAKBsKGmkzmo9CibXHGgYZsha5KC+YpVqQ25kyFgAQSPhiSAAiGkkBk6+gSmkX7n3Q5d88W3zQP5N1mGqm3FoVXPXRxWfEpp59z58hj77e/yVds/C27bO/3V595dOl3myqWL2dClUvqnFPa8ug7fT9vWTL+z69H1GemLuKko98/ffQYd+VisujpsnUtPVt8Hz372dThr0dPPP7VFPvTwcKnT54+u/tjdPzLv/jNl5+4d/TrsVORrm8u3XPT8OvvjSkPv/AHfTtMXv3jmQPll61Mb3/RuuWuttSpv/cMfpxbufTI4UbPxoqDv153cHrx/cyKve92qfySWKXy5g2Vr2/dvOxIBblHvfamG7qVa38ZndxG+t+7ZZv+2O6t+pYfz5z93Rd+4o+pS9as+jNy5/U/fT52qQLMe6wPPhzxHE4Ndg9/suK5wtv9x6Yariu/5mwbf0x8Ax5/av0zK46fvrXjbqZFf+NdztxZ0ANLj2565KRTcV852PrkiZ4fnv/0ZOvJq/799gq3UuVly2L7Oq5fXFb2H5UFMyI= \ No newline at end of file diff --git a/docs/cassettes/llm_chain_6bacb837.msgpack.zlib b/docs/cassettes/llm_chain_6bacb837.msgpack.zlib deleted file mode 100644 index a8acc9ea3bb704..00000000000000 --- a/docs/cassettes/llm_chain_6bacb837.msgpack.zlib +++ /dev/null @@ -1 +0,0 @@ -eNptVWtsFFUULi/TABoSrYlBZNhIgqYzndnZ3dktElK2hdbSd6ksKMvszN3d6c7Mnc69s91tg0aeKkaZf4gBHyy7ZikFpEFEihKor6AQHz8agpqgJCoYwRJAMXhnu5U2MMk+7j3nfOf1nTPrc0lgIgXqk/oUHQNTlDA5IHt9zgRdFkB4Y1YDOA7lTHNTW/tuy1SGn4xjbKDKigrRUBhoAF1UGAlqFUmuQoqLuIL8N1RQgMlEoJweHup1aQAhMQaQq5Ja3euSIHGlY3JwtZuijlQRAwrHARWFqgq7FT1GRU2oUTV6TFVQnCKRQaoOi6oi6q5yymVCFTjGKI0w0FzryqkJmHFl3ngtCwHTte45cqNBGajOVczAtMfR0cmJI78Im0DUyCEqqgiQC4JrkGpgy3QwWEZYl4sDUSa1+qFkViYOEbb7J+a/X5QkQHCBLkGZpGDvi/UoRjklg6iTX54EqINCde18AgCDJukkQXbUyj4gGoaqSKIjr+hEUO8rJkTjtAHuFuedrGhSUh3bA00kiKq6iuY0aZROcYzHz7AHUjTCoqKrpPK0KpJ4skZB/tF4gSFKCQJCF0lgZ0eN+8frQGTvaRClprYJkKIpxe09oqn5PIfG35uWjhUN2Llg893uisI77niG4xjh4ARglNYle0+hER9MMAbYTNMSJBj2O2xWgjChAHv4ajgsRcMRbVEonPIw1kpV46vbJAW1N4YEg+famcASoyO0bIUWg921y5tTjCWzNCe4BY/XLfi9NMewDMdwdHi5ashLO9vcHbzaU+23WlvDNa0tdeEOCFd5LA13NtYgXA8aoyEuVPsMY6YiUV9LjbVK6Iqna0PLxJj0NCOkO5I1TUu6IlXu1pCWqDWqFlIkOiupyIs61eV+zr0i1LU8kvTWssEWxiOEUScPtcaUj+9sYbCvGgtBGPDVjQuPC/hpthihj/X4WefpH+OGCvQYjtu7uYDwngmQQUYObMiSkmELrc8QHoLTn+eKs/duU/0dCpdlqgkn7cGlplJOuQWqDRiUm3V7KM5b6Q1UerzUsob2vmDRTfs9KXiwMLtRQsOaMcrnpLilJ4CcD96T7IMO2UknnfDJdNIgZUAE6GJUdt9KunV06dB11YdGJ4uGZkzUlZ6CW3uwwPrunlS3LFmyHE92a2ygx8MrEWBJ0YGiiWFCxw0JiNaQvdsbYPuLkjHe5UmuhAQszXIfktFXJDJmTjIGNDGNgETWHE7bw+WamHJmbBHPeXkfKfxCsook1ZJBmxWphhphJlpIGSZQoSgfTdFkXwBV0RTSmMJ3cYUiO+MlxkfuVsAwAciy3VNo6/HxchM46E4Kd0A8gUDg2L2VxoCEQMAXODpRB4HxkXBuDR25W6EIkHHzGupLjenTimwPP04OYRDgOFkWWB/r41k37+b8gsT6vSDikyOAFGd/cCkdFKU4oNsK7LNz1aHGqoa64OGV9Hga0U3G6Asmp0OkK9Fotg2YpC12XlKhJZNVaYIswWqtCtkDfikgRXjAS1FW9EclgV5CltAY2v+kyzh7tvCmeTHrNFOPDU0Kzt1aWlJ4ppDP7dt4G9d0kp216fqt6Qht3TEwf8Hs/KnSmdR9G10Xnm3dcGHrpRl0/u8f7fOeJ6bUb75+c/BY75bq0rNlD03+ruP9zoX8x+d+tr5/YfHxi//0Xrt69qlroS9/5Y/uDF1e+/CjI1defuVMf6JGi23zDj/Y8cB8qbbhr/LwvkO3knOq6st2rE1U7tp/ceubOOA/nCnrWXBqtWvjrC037j+vbupdcCqS7Fg1ssZXOnSjttSQ10/+pvdyaNXm4alloaWzf9cHMnvrLx74Y3DmThWefnvuZ2890lm5bO7zP+1ek9j772G8YNHwrEtvVL40sr30ymOv/3Z7ZOPXLee6vhjZ5JsaPdnAfiWc+OWT7St3zDhzdXFTaF69dmuIMfI5jZ9W/ulrV2r/XLzrIJ3QZuOd33Zl/UPczWlOpaaUnJj+6mtzJpeU/AdOizBy \ No newline at end of file diff --git a/docs/docs/concepts/index.mdx b/docs/docs/concepts/index.mdx index cd7339daa6367a..c53cc21ce21f1a 100644 --- a/docs/docs/concepts/index.mdx +++ b/docs/docs/concepts/index.mdx @@ -68,6 +68,7 @@ The conceptual guide does not cover step-by-step instructions or specific implem - **[langchain](/docs/concepts/architecture#langchain)**: A package for higher level components (e.g., some pre-built chains). - **[langgraph](/docs/concepts/architecture#langgraph)**: Powerful orchestration layer for LangChain. Use to build complex pipelines and workflows. - **[langserve](/docs/concepts/architecture#langserve)**: Use to deploy LangChain Runnables as REST endpoints. Uses FastAPI. Works primarily for LangChain Runnables, does not currently integrate with LangGraph. +- **[LLMs (legacy)](/docs/concepts/text_llms)**: Older language models that take a string as input and return a string as output. - **[Managing chat history](/docs/concepts/chat_history#managing-chat-history)**: Techniques to maintain and manage the chat history. - **[OpenAI format](/docs/concepts/messages#openai-format)**: OpenAI's message format for chat models. - **[Propagation of RunnableConfig](/docs/concepts/runnables/#propagation-of-runnableconfig)**: Propagating configuration through Runnables. Read if working with python 3.9, 3.10 and async. diff --git a/docs/docs/contributing/how_to/code/guidelines.mdx b/docs/docs/contributing/how_to/code/guidelines.mdx index 175290a6428c2a..54bf05238d1cb9 100644 --- a/docs/docs/contributing/how_to/code/guidelines.mdx +++ b/docs/docs/contributing/how_to/code/guidelines.mdx @@ -29,7 +29,7 @@ or new agents/chains from outside contributors without an existing GitHub discus - New features must come with docs, unit tests, and (if appropriate) integration tests. - New integrations must come with docs, unit tests, and (if appropriate) integration tests. - - See [this page](../integrations.mdx) for more details on contributing new integrations. + - See [this page](../integrations/index.mdx) for more details on contributing new integrations. - New functionality should not inherit from or use deprecated methods or classes. - We will reject features that are likely to lead to security vulnerabilities or reports. - Do not add any hard dependencies. Integrations may add optional dependencies. diff --git a/docs/docs/contributing/how_to/index.mdx b/docs/docs/contributing/how_to/index.mdx index 820c20c97a5e6e..e4eda7ddbc1d8a 100644 --- a/docs/docs/contributing/how_to/index.mdx +++ b/docs/docs/contributing/how_to/index.mdx @@ -2,4 +2,4 @@ - [**Documentation**](documentation/index.mdx): Help improve our docs, including this one! - [**Code**](code/index.mdx): Help us write code, fix bugs, or improve our infrastructure. -- [**Integrations**](integrations.mdx): Help us integrate with your favorite vendors and tools. \ No newline at end of file +- [**Integrations**](integrations/index.mdx): Help us integrate with your favorite vendors and tools. \ No newline at end of file diff --git a/docs/docs/contributing/how_to/integrations.mdx b/docs/docs/contributing/how_to/integrations.mdx deleted file mode 100644 index ed2cc9ec449c67..00000000000000 --- a/docs/docs/contributing/how_to/integrations.mdx +++ /dev/null @@ -1,203 +0,0 @@ ---- -sidebar_position: 5 ---- - -# Contribute Integrations - -To begin, make sure you have all the dependencies outlined in guide on [Contributing Code](code/index.mdx). - -There are a few different places you can contribute integrations for LangChain: - -- **Community**: For lighter-weight integrations that are primarily maintained by LangChain and the Open Source Community. -- **Partner Packages**: For independent packages that are co-maintained by LangChain and a partner. - -For the most part, **new integrations should be added to the Community package**. Partner packages require more maintenance as separate packages, so please confirm with the LangChain team before creating a new partner package. - -In the following sections, we'll walk through how to contribute to each of these packages from a fake company, `Parrot Link AI`. - -## Community package - -The `langchain-community` package is in `libs/community` and contains most integrations. - -It can be installed with `pip install langchain-community`, and exported members can be imported with code like - -```python -from langchain_community.chat_models import ChatParrotLink -from langchain_community.llms import ParrotLinkLLM -from langchain_community.vectorstores import ParrotLinkVectorStore -``` - -The `community` package relies on manually-installed dependent packages, so you will see errors -if you try to import a package that is not installed. In our fake example, if you tried to import `ParrotLinkLLM` without installing `parrot-link-sdk`, you will see an `ImportError` telling you to install it when trying to use it. - -Let's say we wanted to implement a chat model for Parrot Link AI. We would create a new file in `libs/community/langchain_community/chat_models/parrot_link.py` with the following code: - -```python -from langchain_core.language_models.chat_models import BaseChatModel - -class ChatParrotLink(BaseChatModel): - """ChatParrotLink chat model. - - Example: - .. code-block:: python - - from langchain_community.chat_models import ChatParrotLink - - model = ChatParrotLink() - """ - - ... -``` - -And we would write tests in: - -- Unit tests: `libs/community/tests/unit_tests/chat_models/test_parrot_link.py` -- Integration tests: `libs/community/tests/integration_tests/chat_models/test_parrot_link.py` - -And add documentation to: - -- `docs/docs/integrations/chat/parrot_link.ipynb` - -## Partner package in LangChain repo - -:::caution -Before starting a **partner** package, please confirm your intent with the LangChain team. Partner packages require more maintenance as separate packages, so we will close PRs that add new partner packages without prior discussion. See the above section for how to add a community integration. -::: - -Partner packages can be hosted in the `LangChain` monorepo or in an external repo. - -Partner package in the `LangChain` repo is placed in `libs/partners/{partner}` -and the package source code is in `libs/partners/{partner}/langchain_{partner}`. - -A package is -installed by users with `pip install langchain-{partner}`, and the package members -can be imported with code like: - -```python -from langchain_{partner} import X -``` - -### Set up a new package - -To set up a new partner package, use the latest version of the LangChain CLI. You can install or update it with: - -```bash -pip install -U langchain-cli -``` - -Let's say you want to create a new partner package working for a company called Parrot Link AI. - -Then, run the following command to create a new partner package: - -```bash -cd libs/partners -langchain-cli integration new -> Name: parrot-link -> Name of integration in PascalCase [ParrotLink]: ParrotLink -``` - -This will create a new package in `libs/partners/parrot-link` with the following structure: - -``` -libs/partners/parrot-link/ - langchain_parrot_link/ # folder containing your package - ... - tests/ - ... - docs/ # bootstrapped docs notebooks, must be moved to /docs in monorepo root - ... - scripts/ # scripts for CI - ... - LICENSE - README.md # fill out with information about your package - Makefile # default commands for CI - pyproject.toml # package metadata, mostly managed by Poetry - poetry.lock # package lockfile, managed by Poetry - .gitignore -``` - -### Implement your package - -First, add any dependencies your package needs, such as your company's SDK: - -```bash -poetry add parrot-link-sdk -``` - -If you need separate dependencies for type checking, you can add them to the `typing` group with: - -```bash -poetry add --group typing types-parrot-link-sdk -``` - -Then, implement your package in `libs/partners/parrot-link/langchain_parrot_link`. - -By default, this will include stubs for a Chat Model, an LLM, and/or a Vector Store. You should delete any of the files you won't use and remove them from `__init__.py`. - -### Write Unit and Integration Tests - -Some basic tests are presented in the `tests/` directory. You should add more tests to cover your package's functionality. - -For information on running and implementing tests, see the [Testing guide](testing.mdx). - -### Write documentation - -Documentation is generated from Jupyter notebooks in the `docs/` directory. You should place the notebooks with examples -to the relevant `docs/docs/integrations` directory in the monorepo root. - -### (If Necessary) Deprecate community integration - -Note: this is only necessary if you're migrating an existing community integration into -a partner package. If the component you're integrating is net-new to LangChain (i.e. -not already in the `community` package), you can skip this step. - -Let's pretend we migrated our `ChatParrotLink` chat model from the community package to -the partner package. We would need to deprecate the old model in the community package. - -We would do that by adding a `@deprecated` decorator to the old model as follows, in -`libs/community/langchain_community/chat_models/parrot_link.py`. - -Before our change, our chat model might look like this: - -```python -class ChatParrotLink(BaseChatModel): - ... -``` - -After our change, it would look like this: - -```python -from langchain_core._api.deprecation import deprecated - -@deprecated( - since="0.0.", - removal="0.2.0", - alternative_import="langchain_parrot_link.ChatParrotLink" -) -class ChatParrotLink(BaseChatModel): - ... -``` - -You should do this for *each* component that you're migrating to the partner package. - -### Additional steps - -Contributor steps: - -- [ ] Add secret names to manual integrations workflow in `.github/workflows/_integration_test.yml` -- [ ] Add secrets to release workflow (for pre-release testing) in `.github/workflows/_release.yml` - -Maintainer steps (Contributors should **not** do these): - -- [ ] set up pypi and test pypi projects -- [ ] add credential secrets to Github Actions -- [ ] add package to conda-forge - -## Partner package in external repo - -Partner packages in external repos must be coordinated between the LangChain team and -the partner organization to ensure that they are maintained and updated. - -If you're interested in creating a partner package in an external repo, please start -with one in the LangChain repo, and then reach out to the LangChain team to discuss -how to move it to an external repo. diff --git a/docs/docs/contributing/how_to/integrations/community.mdx b/docs/docs/contributing/how_to/integrations/community.mdx new file mode 100644 index 00000000000000..0b965b0e4c9971 --- /dev/null +++ b/docs/docs/contributing/how_to/integrations/community.mdx @@ -0,0 +1,50 @@ +## How to add a community integration (deprecated) + +:::danger + +We are no longer accepting new community integrations. Please see the +[main integration guide](./index.mdx) for more information on contributing new +integrations. + +::: + +The `langchain-community` package is in `libs/community`. + +It can be installed with `pip install langchain-community`, and exported members can be imported with code like + +```python +from langchain_community.chat_models import ChatParrotLink +from langchain_community.llms import ParrotLinkLLM +from langchain_community.vectorstores import ParrotLinkVectorStore +``` + +The `community` package relies on manually-installed dependent packages, so you will see errors +if you try to import a package that is not installed. In our fake example, if you tried to import `ParrotLinkLLM` without installing `parrot-link-sdk`, you will see an `ImportError` telling you to install it when trying to use it. + +Let's say we wanted to implement a chat model for Parrot Link AI. We would create a new file in `libs/community/langchain_community/chat_models/parrot_link.py` with the following code: + +```python +from langchain_core.language_models.chat_models import BaseChatModel + +class ChatParrotLink(BaseChatModel): + """ChatParrotLink chat model. + + Example: + .. code-block:: python + + from langchain_community.chat_models import ChatParrotLink + + model = ChatParrotLink() + """ + + ... +``` + +And we would write tests in: + +- Unit tests: `libs/community/tests/unit_tests/chat_models/test_parrot_link.py` +- Integration tests: `libs/community/tests/integration_tests/chat_models/test_parrot_link.py` + +And add documentation to: + +- `docs/docs/integrations/chat/parrot_link.ipynb` diff --git a/docs/docs/contributing/how_to/integrations/from_template.mdx b/docs/docs/contributing/how_to/integrations/from_template.mdx new file mode 100644 index 00000000000000..e2edc80eae06ac --- /dev/null +++ b/docs/docs/contributing/how_to/integrations/from_template.mdx @@ -0,0 +1,132 @@ +# How to publish an integration package from a template + +:::danger +This guide is a work-in-progress. +::: + +First, duplicate this template repository: https://github.com/langchain-ai/integration-repo-template + +In this guide, we will create a `libs/langchain-parrot-link` folder, simulating the creation +of a partner package for a fake company, "Parrot Link AI". + +A package is +installed by users with `pip install langchain-{partner}`, and the package members +can be imported with code like: + +```python +from langchain_{partner} import X +``` + +## Set up a new package + +To set up a new partner package, use the latest version of the LangChain CLI. You can install or update it with: + +```bash +pip install -U langchain-cli +``` + +Let's say you want to create a new partner package working for a company called Parrot Link AI. + +Then, run the following command to create a new partner package: + +```bash +mkdir libs +cd libs/ +langchain-cli integration new +> Name: parrot-link +> Name of integration in PascalCase [ParrotLink]: ParrotLink +``` + +This will create a new package in `libs/parrot-link` with the following structure: + +``` +libs/parrot-link/ + langchain_parrot_link/ # folder containing your package + ... + tests/ + ... + docs/ # bootstrapped docs notebooks, must be moved to /docs in monorepo root + ... + scripts/ # scripts for CI + ... + LICENSE + README.md # fill out with information about your package + Makefile # default commands for CI + pyproject.toml # package metadata, mostly managed by Poetry + poetry.lock # package lockfile, managed by Poetry + .gitignore +``` + +## Implement your package + +First, add any dependencies your package needs, such as your company's SDK: + +```bash +poetry add parrot-link-sdk +``` + +If you need separate dependencies for type checking, you can add them to the `typing` group with: + +```bash +poetry add --group typing types-parrot-link-sdk +``` + +Then, implement your package in `libs/partners/parrot-link/langchain_parrot_link`. + +By default, this will include stubs for a Chat Model, an LLM, and/or a Vector Store. You should delete any of the files you won't use and remove them from `__init__.py`. + +## Write Unit and Integration Tests + +Some basic tests are presented in the `tests/` directory. You should add more tests to cover your package's functionality. + +For information on running and implementing tests, see the [Testing guide](../testing.mdx). + +## Write documentation + +Documentation is generated from Jupyter notebooks in the `docs/` directory. You should place the notebooks with examples +to the relevant `docs/docs/integrations` directory in the monorepo root. + +## (If Necessary) Deprecate community integration + +Note: this is only necessary if you're migrating an existing community integration into +a partner package. If the component you're integrating is net-new to LangChain (i.e. +not already in the `community` package), you can skip this step. + +Let's pretend we migrated our `ChatParrotLink` chat model from the community package to +the partner package. We would need to deprecate the old model in the community package. + +We would do that by adding a `@deprecated` decorator to the old model as follows, in +`libs/community/langchain_community/chat_models/parrot_link.py`. + +Before our change, our chat model might look like this: + +```python +class ChatParrotLink(BaseChatModel): + ... +``` + +After our change, it would look like this: + +```python +from langchain_core._api.deprecation import deprecated + +@deprecated( + since="0.0.", + removal="0.2.0", + alternative_import="langchain_parrot_link.ChatParrotLink" +) +class ChatParrotLink(BaseChatModel): + ... +``` + +You should do this for *each* component that you're migrating to the partner package. + +## Additional steps + +Contributor steps: + +- [ ] Add secret names to manual integrations workflow in `.github/workflows/_integration_test.yml` +- [ ] Add secrets to release workflow (for pre-release testing) in `.github/workflows/_release.yml` +- [ ] set up pypi and test pypi projects +- [ ] add credential secrets to Github Actions +- [ ] add package to conda-forge diff --git a/docs/docs/contributing/how_to/integrations/index.mdx b/docs/docs/contributing/how_to/integrations/index.mdx new file mode 100644 index 00000000000000..5f08dce43606e2 --- /dev/null +++ b/docs/docs/contributing/how_to/integrations/index.mdx @@ -0,0 +1,79 @@ +--- +sidebar_position: 5 +--- + +# Contribute Integrations + +LangChain integrations are packages that provide access to language models, vector stores, and other components that can be used in LangChain. + +This guide will walk you through how to contribute new integrations to LangChain, by +publishing an integration package to PyPi, and adding documentation for it +to the LangChain Monorepo. + +These instructions will evolve over the next few months as we improve our integration +processes. + +## Components to Integrate + +:::info + +See the [Conceptual Guide](../../../concepts/index.mdx) for an overview of all components +supported in LangChain + +::: + +While any component can be integrated into LangChain, at this time we are only accepting +new integrations in the docs of the following kinds: + + + + + + + + + + +
Integrate these βœ…Not these ❌
+
    +
  • Chat Models
  • +
  • Tools/Toolkits
  • +
  • Retrievers
  • +
  • Document Loaders
  • +
  • Vector Stores
  • +
  • Embedding Models
  • +
+
+
    +
  • LLMs (Text-Completion Models)
  • +
  • Key-Value Stores
  • +
  • Document Transformers
  • +
  • Model Caches
  • +
  • Graphs
  • +
  • Message Histories
  • +
  • Callbacks
  • +
  • Chat Loaders
  • +
  • Adapters
  • +
  • Other abstractions
  • +
+
+ +## How to contribute an integration + +The only step necessary to "be" a LangChain integration is to add documentation +that will render on this site (https://python.langchain.com/). + +As a prerequisite to adding your integration to our documentation, you must: + +1. Confirm that your integration is in the list of components we are currently accepting. +2. Ensure that your integration is in a separate package that can be installed with `pip install `. +3. Implement the standard tests for your integration and successfully run them. +3. Write documentation for your integration in the `docs/docs/integrations` directory of the LangChain monorepo. +4. Add a provider page for your integration in the `docs/docs/integrations/providers` directory of the LangChain monorepo. + +Once you have completed these steps, you can submit a PR to the LangChain monorepo to add your integration to the documentation. + +## Further Reading + +If you're starting from scratch, you can follow the [Integration Template Guide](./from_template.mdx) to create and publish a new integration package +to the above spec. diff --git a/docs/docs/contributing/how_to/integrations/standard_tests.ipynb b/docs/docs/contributing/how_to/integrations/standard_tests.ipynb new file mode 100644 index 00000000000000..393bf6961c5789 --- /dev/null +++ b/docs/docs/contributing/how_to/integrations/standard_tests.ipynb @@ -0,0 +1,325 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# How to add standard tests to an integration\n", + "\n", + "Implementing standard tests \n", + "\n", + "When creating either a custom class for yourself or a new tool to publish in a LangChain integration, it is important to add standard tests to ensure it works as expected. This guide will show you how to add standard tests to a tool, and the templates for implementing each different kind of integration are linked [at the bottom](#standard-test-templates-per-component).\n", + "\n", + "## Setup\n", + "\n", + "First, let's install 2 dependencies:\n", + "\n", + "- `langchain-core` will define the interfaces we want to import to define our custom tool.\n", + "- `langchain-tests==0.3.0` will provide the standard tests we want to use.\n", + "\n", + ":::note\n", + "\n", + "The `langchain-tests` package contains the module `langchain_standard_tests`. This name\n", + "mistmatch is due to this package historically being called `langchain_standard_tests` and\n", + "the name not being available on PyPi. This will either be reconciled by our \n", + "[PEP 541 request](https://github.com/pypi/support/issues/5062) (we welcome upvotes!), \n", + "or in a new release of `langchain-tests`.\n", + "\n", + "Because added tests in new versions of `langchain-tests` will always break your CI/CD pipelines, we recommend pinning the \n", + "version of `langchain-tests==0.3.0` to avoid unexpected changes.\n", + "\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -U langchain-core langchain-tests==0.3.0 pytest pytest-socket" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's say we're publishing a package, `langchain_parrot_link`, that exposes a\n", + "tool called `ParrotMultiplyTool`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# title=\"langchain_parrot_link/tools.py\"\n", + "from langchain_core.tools import BaseTool\n", + "\n", + "\n", + "class ParrotMultiplyTool(BaseTool):\n", + " name: str = \"ParrotMultiplyTool\"\n", + " description: str = (\n", + " \"Multiply two numbers like a parrot. Parrots always add \"\n", + " \"eighty for their matey.\"\n", + " )\n", + "\n", + " def _run(self, a: int, b: int) -> int:\n", + " return a * b + 80" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we'll assume you've structured your package the same way as the main LangChain\n", + "packages:\n", + "\n", + "```\n", + "/\n", + "β”œβ”€β”€ langchain_parrot_link/\n", + "β”‚ └── tools.py\n", + "└── tests/\n", + " β”œβ”€β”€ unit_tests/\n", + " β”‚ └── test_tools.py\n", + " └── integration_tests/\n", + " └── test_tools.py\n", + "```\n", + "\n", + "## Add and configure standard tests\n", + "\n", + "There are 2 namespaces in the `langchain-tests` package: \n", + "\n", + "- unit tests (`langchain_standard_tests.unit_tests`): designed to be used to test the tool in isolation and without access to external services\n", + "- integration tests (`langchain_standard_tests.integration_tests`): designed to be used to test the tool with access to external services (in particular, the external service that the tool is designed to interact with).\n", + "\n", + ":::note\n", + "\n", + "Integration tests can also be run without access to external services, **if** they are properly mocked.\n", + "\n", + ":::\n", + "\n", + "Both types of tests are implemented as [`pytest` class-based test suites](https://docs.pytest.org/en/7.1.x/getting-started.html#group-multiple-tests-in-a-class).\n", + "\n", + "By subclassing the base classes for each type of standard test (see below), you get all of the standard tests for that type, and you\n", + "can override the properties that the test suite uses to configure the tests.\n", + "\n", + "### Standard tools tests\n", + "\n", + "Here's how you would configure the standard unit tests for the custom tool, e.g. in `tests/test_tools.py`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "title": "tests/test_custom_tool.py" + }, + "outputs": [], + "source": [ + "# title=\"tests/unit_tests/test_tools.py\"\n", + "from typing import Type\n", + "\n", + "from langchain_parrot_link.tools import ParrotMultiplyTool\n", + "from langchain_standard_tests.unit_tests import ToolsUnitTests\n", + "\n", + "\n", + "class ParrotMultiplyToolUnitTests(ToolsUnitTests):\n", + " @property\n", + " def tool_constructor(self) -> Type[ParrotMultiplyTool]:\n", + " return ParrotMultiplyTool\n", + "\n", + " def tool_constructor_params(self) -> dict:\n", + " # if your tool constructor instead required initialization arguments like\n", + " # `def __init__(self, some_arg: int):`, you would return those here\n", + " # as a dictionary, e.g.: `return {'some_arg': 42}`\n", + " return {}\n", + "\n", + " def tool_invoke_params_example(self) -> dict:\n", + " \"\"\"\n", + " Returns a dictionary representing the \"args\" of an example tool call.\n", + "\n", + " This should NOT be a ToolCall dict - i.e. it should not\n", + " have {\"name\", \"id\", \"args\"} keys.\n", + " \"\"\"\n", + " return {\"a\": 2, \"b\": 3}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# title=\"tests/integration_tests/test_tools.py\"\n", + "from typing import Type\n", + "\n", + "from langchain_parrot_link.tools import ParrotMultiplyTool\n", + "from langchain_standard_tests.integration_tests import ToolsIntegrationTests\n", + "\n", + "\n", + "class ParrotMultiplyToolIntegrationTests(ToolsIntegrationTests):\n", + " @property\n", + " def tool_constructor(self) -> Type[ParrotMultiplyTool]:\n", + " return ParrotMultiplyTool\n", + "\n", + " def tool_constructor_params(self) -> dict:\n", + " # if your tool constructor instead required initialization arguments like\n", + " # `def __init__(self, some_arg: int):`, you would return those here\n", + " # as a dictionary, e.g.: `return {'some_arg': 42}`\n", + " return {}\n", + "\n", + " def tool_invoke_params_example(self) -> dict:\n", + " \"\"\"\n", + " Returns a dictionary representing the \"args\" of an example tool call.\n", + "\n", + " This should NOT be a ToolCall dict - i.e. it should not\n", + " have {\"name\", \"id\", \"args\"} keys.\n", + " \"\"\"\n", + " return {\"a\": 2, \"b\": 3}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "and you would run these with the following commands from your project root\n", + "\n", + "```bash\n", + "# run unit tests without network access\n", + "pytest --disable-socket --enable-unix-socket tests/unit_tests\n", + "\n", + "# run integration tests\n", + "pytest tests/integration_tests\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Standard test templates per component:\n", + "\n", + "Above, we implement the **unit** and **integration** standard tests for a tool. Below are the templates for implementing the standard tests for each component:\n", + "\n", + "
\n", + " Chat Models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# title=\"tests/unit_tests/test_chat_models.py\"\n", + "from typing import Tuple, Type\n", + "\n", + "from langchain_parrot_link.chat_models import ChatParrotLink\n", + "from langchain_standard_tests.unit_tests import ChatModelUnitTests\n", + "\n", + "\n", + "class ChatParrotLinkUnitTests(ChatModelUnitTests):\n", + " @property\n", + " def chat_model_class(self) -> Type[ChatParrotLink]:\n", + " return ChatParrotLink" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# title=\"tests/integration_tests/test_chat_models.py\"\n", + "from typing import Type\n", + "\n", + "from langchain_parrot_link.chat_models import ChatParrotLink\n", + "from langchain_standard_tests.integration_tests import ChatModelIntegrationTests\n", + "\n", + "\n", + "class TestMistralStandard(ChatModelIntegrationTests):\n", + " @property\n", + " def chat_model_class(self) -> Type[ChatParrotLink]:\n", + " return ChatParrotLink\n", + "\n", + " @property\n", + " def chat_model_params(self) -> dict:\n", + " return {\"model\": \"bird-brain-001\", \"temperature\": 0}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\n", + "
\n", + "Work in progress:\n", + "
\n", + " Tools/Toolkits\n", + " TODO" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "
\n", + " Retrievers\n", + " TODO" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "
\n", + " Vector Stores\n", + " TODO" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "
\n", + " Embedding Models\n", + " TODO" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/docs/contributing/index.mdx b/docs/docs/contributing/index.mdx index e567ee75a71f71..67930710585dc5 100644 --- a/docs/docs/contributing/index.mdx +++ b/docs/docs/contributing/index.mdx @@ -16,7 +16,7 @@ More coming soon! We are working on tutorials to help you make your first contri - [**Documentation**](how_to/documentation/index.mdx): Help improve our docs, including this one! - [**Code**](how_to/code/index.mdx): Help us write code, fix bugs, or improve our infrastructure. -- [**Integrations**](how_to/integrations.mdx): Help us integrate with your favorite vendors and tools. +- [**Integrations**](how_to/integrations/index.mdx): Help us integrate with your favorite vendors and tools. ## Reference diff --git a/docs/docs/contributing/reference/repo_structure.mdx b/docs/docs/contributing/reference/repo_structure.mdx index f7df2f11423abf..8838fdfb9357b8 100644 --- a/docs/docs/contributing/reference/repo_structure.mdx +++ b/docs/docs/contributing/reference/repo_structure.mdx @@ -61,5 +61,5 @@ The `/libs` directory contains the code for the LangChain packages. To learn more about how to contribute code see the following guidelines: - [Code](../how_to/code/index.mdx): Learn how to develop in the LangChain codebase. -- [Integrations](../how_to/integrations.mdx): Learn how to contribute to third-party integrations to `langchain-community` or to start a new partner package. +- [Integrations](../how_to/integrations/index.mdx): Learn how to contribute to third-party integrations to `langchain-community` or to start a new partner package. - [Testing](../how_to/testing.mdx): Guidelines to learn how to write tests for the packages. diff --git a/docs/docs/how_to/HTML_header_metadata_splitter.ipynb b/docs/docs/how_to/HTML_header_metadata_splitter.ipynb index 2b336ed8844022..266f5036679273 100644 --- a/docs/docs/how_to/HTML_header_metadata_splitter.ipynb +++ b/docs/docs/how_to/HTML_header_metadata_splitter.ipynb @@ -13,7 +13,7 @@ "# How to split by HTML header \n", "## Description and motivation\n", "\n", - "[HTMLHeaderTextSplitter](https://python.langchain.com/api_reference/text_splitters/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html) is a \"structure-aware\" chunker that splits text at the HTML element level and adds metadata for each header \"relevant\" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline.\n", + "[HTMLHeaderTextSplitter](https://python.langchain.com/api_reference/text_splitters/html/langchain_text_splitters.html.HTMLHeaderTextSplitter.html) is a \"structure-aware\" [text splitter](/docs/concepts/text_splitters/) that splits text at the HTML element level and adds metadata for each header \"relevant\" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline.\n", "\n", "It is analogous to the [MarkdownHeaderTextSplitter](/docs/how_to/markdown_header_metadata_splitter) for markdown files.\n", "\n", diff --git a/docs/docs/how_to/HTML_section_aware_splitter.ipynb b/docs/docs/how_to/HTML_section_aware_splitter.ipynb index be4da3f2156a78..368a7a2bcc4316 100644 --- a/docs/docs/how_to/HTML_section_aware_splitter.ipynb +++ b/docs/docs/how_to/HTML_section_aware_splitter.ipynb @@ -12,7 +12,7 @@ "source": [ "# How to split by HTML sections\n", "## Description and motivation\n", - "Similar in concept to the [HTMLHeaderTextSplitter](/docs/how_to/HTML_header_metadata_splitter), the `HTMLSectionSplitter` is a \"structure-aware\" chunker that splits text at the element level and adds metadata for each header \"relevant\" to any given chunk.\n", + "Similar in concept to the [HTMLHeaderTextSplitter](/docs/how_to/HTML_header_metadata_splitter), the `HTMLSectionSplitter` is a \"structure-aware\" [text splitter](/docs/concepts/text_splitters/) that splits text at the element level and adds metadata for each header \"relevant\" to any given chunk.\n", "\n", "It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures.\n", "\n", diff --git a/docs/docs/how_to/MultiQueryRetriever.ipynb b/docs/docs/how_to/MultiQueryRetriever.ipynb index d27cca7eaa2a65..06d2c0fdd57f50 100644 --- a/docs/docs/how_to/MultiQueryRetriever.ipynb +++ b/docs/docs/how_to/MultiQueryRetriever.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to use the MultiQueryRetriever\n", "\n", - "Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on a distance metric. But, retrieval may produce different results with subtle changes in query wording, or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n", + "Distance-based [vector database](/docs/concepts/vectorstores/) retrieval [embeds](/docs/concepts/embedding_models/) (represents) queries in high-dimensional space and finds similar embedded documents based on a distance metric. But, retrieval may produce different results with subtle changes in query wording, or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious.\n", "\n", "The [MultiQueryRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.multi_query.MultiQueryRetriever.html) automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` can mitigate some of the limitations of the distance-based retrieval and get a richer set of results.\n", "\n", @@ -151,7 +151,7 @@ "id": "7e170263-facd-4065-bb68-d11fb9123a45", "metadata": {}, "source": [ - "Note that the underlying queries generated by the retriever are logged at the `INFO` level." + "Note that the underlying queries generated by the [retriever](/docs/concepts/retrievers/) are logged at the `INFO` level." ] }, { diff --git a/docs/docs/how_to/add_scores_retriever.ipynb b/docs/docs/how_to/add_scores_retriever.ipynb index 65d56cbcf83a36..3bdc8b805526da 100644 --- a/docs/docs/how_to/add_scores_retriever.ipynb +++ b/docs/docs/how_to/add_scores_retriever.ipynb @@ -7,11 +7,11 @@ "source": [ "# How to add scores to retriever results\n", "\n", - "Retrievers will return sequences of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects, which by default include no information about the process that retrieved them (e.g., a similarity score against a query). Here we demonstrate how to add retrieval scores to the `.metadata` of documents:\n", + "[Retrievers](/docs/concepts/retrievers/) will return sequences of [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) objects, which by default include no information about the process that retrieved them (e.g., a similarity score against a query). Here we demonstrate how to add retrieval scores to the `.metadata` of documents:\n", "1. From [vectorstore retrievers](/docs/how_to/vectorstore_retriever);\n", "2. From higher-order LangChain retrievers, such as [SelfQueryRetriever](/docs/how_to/self_query) or [MultiVectorRetriever](/docs/how_to/multi_vector).\n", "\n", - "For (1), we will implement a short wrapper function around the corresponding vector store. For (2), we will update a method of the corresponding class.\n", + "For (1), we will implement a short wrapper function around the corresponding [vector store](/docs/concepts/vectorstores/). For (2), we will update a method of the corresponding class.\n", "\n", "## Create vector store\n", "\n", diff --git a/docs/docs/how_to/agent_executor.ipynb b/docs/docs/how_to/agent_executor.ipynb index 1c357632630cd0..c52c126a066c33 100644 --- a/docs/docs/how_to/agent_executor.ipynb +++ b/docs/docs/how_to/agent_executor.ipynb @@ -22,7 +22,7 @@ ":::\n", "\n", "By themselves, language models can't take actions - they just output text.\n", - "A big use case for LangChain is creating **agents**.\n", + "A big use case for LangChain is creating **[agents](/docs/concepts/agents/)**.\n", "Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be.\n", "The results of those actions can then be fed back into the agent and it determines whether more actions are needed, or whether it is okay to finish.\n", "\n", diff --git a/docs/docs/how_to/caching_embeddings.ipynb b/docs/docs/how_to/caching_embeddings.ipynb index 71868b812f296f..01187ef9ba9b48 100644 --- a/docs/docs/how_to/caching_embeddings.ipynb +++ b/docs/docs/how_to/caching_embeddings.ipynb @@ -7,7 +7,7 @@ "source": [ "# Caching\n", "\n", - "Embeddings can be stored or temporarily cached to avoid needing to recompute them.\n", + "[Embeddings](/docs/concepts/embedding_models/) can be stored or temporarily cached to avoid needing to recompute them.\n", "\n", "Caching embeddings can be done using a `CacheBackedEmbeddings`. The cache backed embedder is a wrapper around an embedder that caches\n", "embeddings in a key-value store. The text is hashed and the hash is used as the key in the cache.\n", diff --git a/docs/docs/how_to/character_text_splitter.ipynb b/docs/docs/how_to/character_text_splitter.ipynb index ab82464c48fe6f..4de1ca3bfcaacd 100644 --- a/docs/docs/how_to/character_text_splitter.ipynb +++ b/docs/docs/how_to/character_text_splitter.ipynb @@ -21,7 +21,7 @@ "source": [ "# How to split by character\n", "\n", - "This is the simplest method. This splits based on a given character sequence, which defaults to `\"\\n\\n\"`. Chunk length is measured by number of characters.\n", + "This is the simplest method. This [splits](/docs/concepts/text_splitters/) based on a given character sequence, which defaults to `\"\\n\\n\"`. Chunk length is measured by number of characters.\n", "\n", "1. How the text is split: by single character separator.\n", "2. How the chunk size is measured: by number of characters.\n", diff --git a/docs/docs/how_to/chat_model_caching.ipynb b/docs/docs/how_to/chat_model_caching.ipynb index d9a7f38445816b..02a0b9c314ce4f 100644 --- a/docs/docs/how_to/chat_model_caching.ipynb +++ b/docs/docs/how_to/chat_model_caching.ipynb @@ -15,7 +15,7 @@ "\n", ":::\n", "\n", - "LangChain provides an optional caching layer for chat models. This is useful for two main reasons:\n", + "LangChain provides an optional caching layer for [chat models](/docs/concepts/chat_models). This is useful for two main reasons:\n", "\n", "- It can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times. This is especially useful during app development.\n", "- It can speed up your application by reducing the number of API calls you make to the LLM provider.\n", diff --git a/docs/docs/how_to/chat_models_universal_init.ipynb b/docs/docs/how_to/chat_models_universal_init.ipynb index 0b14538b85ae7c..19b8822fe5783f 100644 --- a/docs/docs/how_to/chat_models_universal_init.ipynb +++ b/docs/docs/how_to/chat_models_universal_init.ipynb @@ -7,13 +7,13 @@ "source": [ "# How to init any model in one line\n", "\n", - "Many LLM applications let end users specify what model provider and model they want the application to be powered by. This requires writing some logic to initialize different ChatModels based on some user configuration. The `init_chat_model()` helper method makes it easy to initialize a number of different model integrations without having to worry about import paths and class names.\n", + "Many LLM applications let end users specify what model provider and model they want the application to be powered by. This requires writing some logic to initialize different [chat models](/docs/concepts/chat_models/) based on some user configuration. The `init_chat_model()` helper method makes it easy to initialize a number of different model integrations without having to worry about import paths and class names.\n", "\n", ":::tip Supported models\n", "\n", "See the [init_chat_model()](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) API reference for a full list of supported integrations.\n", "\n", - "Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n", + "Make sure you have the [integration packages](/docs/integrations/chat/) installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n", "\n", ":::" ] diff --git a/docs/docs/how_to/chat_token_usage_tracking.ipynb b/docs/docs/how_to/chat_token_usage_tracking.ipynb index cdcf943e843654..a01ee9f01cb237 100644 --- a/docs/docs/how_to/chat_token_usage_tracking.ipynb +++ b/docs/docs/how_to/chat_token_usage_tracking.ipynb @@ -14,7 +14,7 @@ "\n", ":::\n", "\n", - "Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n", + "Tracking [token](/docs/concepts/tokens/) usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n", "\n", "This guide requires `langchain-anthropic` and `langchain-openai >= 0.1.9`." ] diff --git a/docs/docs/how_to/chatbots_retrieval.ipynb b/docs/docs/how_to/chatbots_retrieval.ipynb index 015e0d34456795..d3874d1776740d 100644 --- a/docs/docs/how_to/chatbots_retrieval.ipynb +++ b/docs/docs/how_to/chatbots_retrieval.ipynb @@ -15,7 +15,7 @@ "source": [ "# How to add retrieval to chatbots\n", "\n", - "Retrieval is a common technique chatbots use to augment their responses with data outside a chat model's training data. This section will cover how to implement retrieval in the context of chatbots, but it's worth noting that retrieval is a very subtle and deep topic - we encourage you to explore [other parts of the documentation](/docs/how_to#qa-with-rag) that go into greater depth!\n", + "[Retrieval](/docs/concepts/retrieval/) is a common technique chatbots use to augment their responses with data outside a chat model's training data. This section will cover how to implement retrieval in the context of chatbots, but it's worth noting that retrieval is a very subtle and deep topic - we encourage you to explore [other parts of the documentation](/docs/how_to#qa-with-rag) that go into greater depth!\n", "\n", "## Setup\n", "\n", @@ -80,7 +80,7 @@ "source": [ "## Creating a retriever\n", "\n", - "We'll use [the LangSmith documentation](https://docs.smith.langchain.com/overview) as source material and store the content in a vectorstore for later retrieval. Note that this example will gloss over some of the specifics around parsing and storing a data source - you can see more [in-depth documentation on creating retrieval systems here](/docs/how_to#qa-with-rag).\n", + "We'll use [the LangSmith documentation](https://docs.smith.langchain.com/overview) as source material and store the content in a [vector store](/docs/concepts/vectorstores/) for later retrieval. Note that this example will gloss over some of the specifics around parsing and storing a data source - you can see more [in-depth documentation on creating retrieval systems here](/docs/how_to#qa-with-rag).\n", "\n", "Let's use a document loader to pull text from the docs:" ] diff --git a/docs/docs/how_to/chatbots_tools.ipynb b/docs/docs/how_to/chatbots_tools.ipynb index 0c9bbf5259e4d1..f5f639d58d7619 100644 --- a/docs/docs/how_to/chatbots_tools.ipynb +++ b/docs/docs/how_to/chatbots_tools.ipynb @@ -42,7 +42,7 @@ "metadata": {}, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ "OpenAI API Key: Β·Β·Β·Β·Β·Β·Β·Β·\n", @@ -78,7 +78,7 @@ "\n", "Our end goal is to create an agent that can respond conversationally to user questions while looking up information as needed.\n", "\n", - "First, let's initialize Tavily and an OpenAI chat model capable of tool calling:" + "First, let's initialize Tavily and an OpenAI [chat model](/docs/concepts/chat_models/) capable of tool calling:" ] }, { diff --git a/docs/docs/how_to/code_splitter.ipynb b/docs/docs/how_to/code_splitter.ipynb index 74755ebeeb0acc..6e7f471028828a 100644 --- a/docs/docs/how_to/code_splitter.ipynb +++ b/docs/docs/how_to/code_splitter.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to split code\n", "\n", - "[RecursiveCharacterTextSplitter](https://python.langchain.com/api_reference/text_splitters/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) includes pre-built lists of separators that are useful for splitting text in a specific programming language.\n", + "[RecursiveCharacterTextSplitter](https://python.langchain.com/api_reference/text_splitters/character/langchain_text_splitters.character.RecursiveCharacterTextSplitter.html) includes pre-built lists of separators that are useful for [splitting text](/docs/concepts/text_splitters/) in a specific programming language.\n", "\n", "Supported languages are stored in the `langchain_text_splitters.Language` enum. They include:\n", "\n", diff --git a/docs/docs/how_to/contextual_compression.ipynb b/docs/docs/how_to/contextual_compression.ipynb index 5def4035eeec89..1009e9eb8b714e 100644 --- a/docs/docs/how_to/contextual_compression.ipynb +++ b/docs/docs/how_to/contextual_compression.ipynb @@ -7,13 +7,13 @@ "source": [ "# How to do retrieval with contextual compression\n", "\n", - "One challenge with retrieval is that usually you don't know the specific queries your document storage system will face when you ingest data into the system. This means that the information most relevant to a query may be buried in a document with a lot of irrelevant text. Passing that full document through your application can lead to more expensive LLM calls and poorer responses.\n", + "One challenge with [retrieval](/docs/concepts/retrieval/) is that usually you don't know the specific queries your document storage system will face when you ingest data into the system. This means that the information most relevant to a query may be buried in a document with a lot of irrelevant text. Passing that full document through your application can lead to more expensive LLM calls and poorer responses.\n", "\n", "Contextual compression is meant to fix this. The idea is simple: instead of immediately returning retrieved documents as-is, you can compress them using the context of the given query, so that only the relevant information is returned. β€œCompressing” here refers to both compressing the contents of an individual document and filtering out documents wholesale.\n", "\n", "To use the Contextual Compression Retriever, you'll need:\n", "\n", - "- a base retriever\n", + "- a base [retriever](/docs/concepts/retrievers/)\n", "- a Document Compressor\n", "\n", "The Contextual Compression Retriever passes queries to the base retriever, takes the initial documents and passes them through the Document Compressor. The Document Compressor takes a list of documents and shortens it by reducing the contents of documents or dropping documents altogether.\n", diff --git a/docs/docs/how_to/custom_chat_model.ipynb b/docs/docs/how_to/custom_chat_model.ipynb index 4fc502ca171c84..6f8e68e01a3546 100644 --- a/docs/docs/how_to/custom_chat_model.ipynb +++ b/docs/docs/how_to/custom_chat_model.ipynb @@ -14,15 +14,15 @@ "\n", ":::\n", "\n", - "In this guide, we'll learn how to create a custom chat model using LangChain abstractions.\n", + "In this guide, we'll learn how to create a custom [chat model](/docs/concepts/chat_models/) using LangChain abstractions.\n", "\n", "Wrapping your LLM with the standard [`BaseChatModel`](https://python.langchain.com/api_reference/core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface allow you to use your LLM in existing LangChain programs with minimal code modifications!\n", "\n", - "As an bonus, your LLM will automatically become a LangChain `Runnable` and will benefit from some optimizations out of the box (e.g., batch via a threadpool), async support, the `astream_events` API, etc.\n", + "As an bonus, your LLM will automatically become a LangChain [Runnable](/docs/concepts/runnables/) and will benefit from some optimizations out of the box (e.g., batch via a threadpool), async support, the `astream_events` API, etc.\n", "\n", "## Inputs and outputs\n", "\n", - "First, we need to talk about **messages**, which are the inputs and outputs of chat models.\n", + "First, we need to talk about **[messages](/docs/concepts/messages/)**, which are the inputs and outputs of chat models.\n", "\n", "### Messages\n", "\n", diff --git a/docs/docs/how_to/custom_retriever.ipynb b/docs/docs/how_to/custom_retriever.ipynb index 31600dcf73fce4..31b6fb90a1c140 100644 --- a/docs/docs/how_to/custom_retriever.ipynb +++ b/docs/docs/how_to/custom_retriever.ipynb @@ -19,9 +19,9 @@ "\n", "## Overview\n", "\n", - "Many LLM applications involve retrieving information from external data sources using a `Retriever`. \n", + "Many LLM applications involve retrieving information from external data sources using a [Retriever](/docs/concepts/retrievers/). \n", "\n", - "A retriever is responsible for retrieving a list of relevant `Documents` to a given user `query`.\n", + "A retriever is responsible for retrieving a list of relevant [Documents](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) to a given user `query`.\n", "\n", "The retrieved documents are often formatted into prompts that are fed into an LLM, allowing the LLM to use the information in the to generate an appropriate response (e.g., answering a user question based on a knowledge base).\n", "\n", diff --git a/docs/docs/how_to/custom_tools.ipynb b/docs/docs/how_to/custom_tools.ipynb index d73604f445c8b9..8046b7b00e4a4b 100644 --- a/docs/docs/how_to/custom_tools.ipynb +++ b/docs/docs/how_to/custom_tools.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to create tools\n", "\n", - "When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n", + "When constructing an [agent](/docs/concepts/agents/), you will need to provide it with a list of [Tools](/docs/concepts/tools/) that it can use. Besides the actual function that is called, the Tool consists of several components:\n", "\n", "| Attribute | Type | Description |\n", "|---------------|---------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n", diff --git a/docs/docs/how_to/document_loader_custom.ipynb b/docs/docs/how_to/document_loader_custom.ipynb index a14b5270405f78..8ebeae8fb63a34 100644 --- a/docs/docs/how_to/document_loader_custom.ipynb +++ b/docs/docs/how_to/document_loader_custom.ipynb @@ -26,7 +26,7 @@ "`Document` objects are often formatted into prompts that are fed into an LLM, allowing the LLM to use the information in the `Document` to generate a desired response (e.g., summarizing the document).\n", "`Documents` can be either used immediately or indexed into a vectorstore for future retrieval and use.\n", "\n", - "The main abstractions for Document Loading are:\n", + "The main abstractions for [Document Loading](/docs/concepts/document_loaders/) are:\n", "\n", "\n", "| Component | Description |\n", diff --git a/docs/docs/how_to/document_loader_pdf.ipynb b/docs/docs/how_to/document_loader_pdf.ipynb index f13edbc99db43f..766560c1db643b 100644 --- a/docs/docs/how_to/document_loader_pdf.ipynb +++ b/docs/docs/how_to/document_loader_pdf.ipynb @@ -9,7 +9,7 @@ "\n", "[Portable Document Format (PDF)](https://en.wikipedia.org/wiki/PDF), standardized as ISO 32000, is a file format developed by Adobe in 1992 to present documents, including text formatting and images, in a manner independent of application software, hardware, and operating systems.\n", "\n", - "This guide covers how to load `PDF` documents into the LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) format that we use downstream.\n", + "This guide covers how to [load](/docs/concepts/document_loaders/) `PDF` documents into the LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) format that we use downstream.\n", "\n", "Text in PDFs is typically represented via text boxes. They may also contain images. A PDF parser might do some combination of the following:\n", "\n", @@ -250,7 +250,7 @@ "metadata": {}, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ "Unstructured API Key: Β·Β·Β·Β·Β·Β·Β·Β·\n" diff --git a/docs/docs/how_to/document_loader_web.ipynb b/docs/docs/how_to/document_loader_web.ipynb index 04c4a3b7c68fb2..9dc424babb2ae2 100644 --- a/docs/docs/how_to/document_loader_web.ipynb +++ b/docs/docs/how_to/document_loader_web.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to load web pages\n", "\n", - "This guide covers how to load web pages into the LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) format that we use downstream. Web pages contain text, images, and other multimedia elements, and are typically represented with HTML. They may include links to other pages or resources.\n", + "This guide covers how to [load](/docs/concepts/document_loaders/) web pages into the LangChain [Document](https://python.langchain.com/api_reference/core/documents/langchain_core.documents.base.Document.html) format that we use downstream. Web pages contain text, images, and other multimedia elements, and are typically represented with HTML. They may include links to other pages or resources.\n", "\n", "LangChain integrates with a host of parsers that are appropriate for web pages. The right parser will depend on your needs. Below we demonstrate two possibilities:\n", "\n", diff --git a/docs/docs/how_to/ensemble_retriever.ipynb b/docs/docs/how_to/ensemble_retriever.ipynb index 99098554f5ef28..ce518edfb0be1a 100644 --- a/docs/docs/how_to/ensemble_retriever.ipynb +++ b/docs/docs/how_to/ensemble_retriever.ipynb @@ -6,7 +6,7 @@ "source": [ "# How to combine results from multiple retrievers\n", "\n", - "The [EnsembleRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple retrievers. It is initialized with a list of [BaseRetriever](https://python.langchain.com/api_reference/core/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n", + "The [EnsembleRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.ensemble.EnsembleRetriever.html) supports ensembling of results from multiple [retrievers](/docs/concepts/retrievers/). It is initialized with a list of [BaseRetriever](https://python.langchain.com/api_reference/core/retrievers/langchain_core.retrievers.BaseRetriever.html) objects. EnsembleRetrievers rerank the results of the constituent retrievers based on the [Reciprocal Rank Fusion](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) algorithm.\n", "\n", "By leveraging the strengths of different algorithms, the `EnsembleRetriever` can achieve better performance than any single algorithm. \n", "\n", diff --git a/docs/docs/how_to/example_selectors.ipynb b/docs/docs/how_to/example_selectors.ipynb index b6594fbdb0ae1b..39f0bcadf14578 100644 --- a/docs/docs/how_to/example_selectors.ipynb +++ b/docs/docs/how_to/example_selectors.ipynb @@ -17,7 +17,7 @@ "source": [ "# How to use example selectors\n", "\n", - "If you have a large number of examples, you may need to select which ones to include in the prompt. The Example Selector is the class responsible for doing so.\n", + "If you have a large number of examples, you may need to select which ones to include in the prompt. The [Example Selector](/docs/concepts/example_selectors/) is the class responsible for doing so.\n", "\n", "The base interface is defined as below:\n", "\n", @@ -36,7 +36,7 @@ "\n", "The only method it needs to define is a ``select_examples`` method. This takes in the input variables and then returns a list of examples. It is up to each specific implementation as to how those examples are selected.\n", "\n", - "LangChain has a few different types of example selectors. For an overview of all these types, see the below table.\n", + "LangChain has a few different types of example selectors. For an overview of all these types, see the [below table](#example-selector-types).\n", "\n", "In this guide, we will walk through creating a custom example selector." ] diff --git a/docs/docs/how_to/example_selectors_langsmith.ipynb b/docs/docs/how_to/example_selectors_langsmith.ipynb index efc9e2db46d3ba..c6a032948bc678 100644 --- a/docs/docs/how_to/example_selectors_langsmith.ipynb +++ b/docs/docs/how_to/example_selectors_langsmith.ipynb @@ -23,7 +23,7 @@ "]} />\n", "\n", "\n", - "LangSmith datasets have built-in support for similarity search, making them a great tool for building and querying few-shot examples.\n", + "[LangSmith](https://docs.smith.langchain.com/) datasets have built-in support for similarity search, making them a great tool for building and querying few-shot examples.\n", "\n", "In this guide we'll see how to use an indexed LangSmith dataset as a few-shot example selector.\n", "\n", diff --git a/docs/docs/how_to/example_selectors_length_based.ipynb b/docs/docs/how_to/example_selectors_length_based.ipynb index 1074b3148e4d7b..dcd897895b7959 100644 --- a/docs/docs/how_to/example_selectors_length_based.ipynb +++ b/docs/docs/how_to/example_selectors_length_based.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to select examples by length\n", "\n", - "This example selector selects which examples to use based on length. This is useful when you are worried about constructing a prompt that will go over the length of the context window. For longer inputs, it will select fewer examples to include, while for shorter inputs it will select more." + "This [example selector](/docs/concepts/example_selectors/) selects which examples to use based on length. This is useful when you are worried about constructing a prompt that will go over the length of the context window. For longer inputs, it will select fewer examples to include, while for shorter inputs it will select more." ] }, { diff --git a/docs/docs/how_to/example_selectors_mmr.ipynb b/docs/docs/how_to/example_selectors_mmr.ipynb index b965a7dec778aa..9b0f96f181aa23 100644 --- a/docs/docs/how_to/example_selectors_mmr.ipynb +++ b/docs/docs/how_to/example_selectors_mmr.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to select examples by maximal marginal relevance (MMR)\n", "\n", - "The `MaxMarginalRelevanceExampleSelector` selects examples based on a combination of which examples are most similar to the inputs, while also optimizing for diversity. It does this by finding the examples with the embeddings that have the greatest cosine similarity with the inputs, and then iteratively adding them while penalizing them for closeness to already selected examples.\n" + "The `MaxMarginalRelevanceExampleSelector` selects [examples](/docs/concepts/example_selectors/) based on a combination of which examples are most similar to the inputs, while also optimizing for diversity. It does this by finding the examples with the embeddings that have the greatest cosine similarity with the inputs, and then iteratively adding them while penalizing them for closeness to already selected examples.\n" ] }, { diff --git a/docs/docs/how_to/example_selectors_ngram.ipynb b/docs/docs/how_to/example_selectors_ngram.ipynb index fb464ef8e306a4..80578100cafee9 100644 --- a/docs/docs/how_to/example_selectors_ngram.ipynb +++ b/docs/docs/how_to/example_selectors_ngram.ipynb @@ -9,7 +9,7 @@ "\n", "The `NGramOverlapExampleSelector` selects and orders examples based on which examples are most similar to the input, according to an ngram overlap score. The ngram overlap score is a float between 0.0 and 1.0, inclusive. \n", "\n", - "The selector allows for a threshold score to be set. Examples with an ngram overlap score less than or equal to the threshold are excluded. The threshold is set to -1.0, by default, so will not exclude any examples, only reorder them. Setting the threshold to 0.0 will exclude examples that have no ngram overlaps with the input.\n" + "The [selector](/docs/concepts/example_selectors/) allows for a threshold score to be set. Examples with an ngram overlap score less than or equal to the threshold are excluded. The threshold is set to -1.0, by default, so will not exclude any examples, only reorder them. Setting the threshold to 0.0 will exclude examples that have no ngram overlaps with the input.\n" ] }, { diff --git a/docs/docs/how_to/example_selectors_similarity.ipynb b/docs/docs/how_to/example_selectors_similarity.ipynb index d6e692cfac2db9..6657cd543300b0 100644 --- a/docs/docs/how_to/example_selectors_similarity.ipynb +++ b/docs/docs/how_to/example_selectors_similarity.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to select examples by similarity\n", "\n", - "This object selects examples based on similarity to the inputs. It does this by finding the examples with the embeddings that have the greatest cosine similarity with the inputs.\n" + "This object selects [examples](/docs/concepts/example_selectors/) based on similarity to the inputs. It does this by finding the examples with the embeddings that have the greatest cosine similarity with the inputs.\n" ] }, { diff --git a/docs/docs/how_to/extraction_examples.ipynb b/docs/docs/how_to/extraction_examples.ipynb index 3de5417d5fbd4a..ff565fb35a2a35 100644 --- a/docs/docs/how_to/extraction_examples.ipynb +++ b/docs/docs/how_to/extraction_examples.ipynb @@ -9,7 +9,7 @@ "\n", "The quality of extractions can often be improved by providing reference examples to the LLM.\n", "\n", - "Data extraction attempts to generate structured representations of information found in text and other unstructured or semi-structured formats. [Tool-calling](/docs/concepts/tool_calling) LLM features are often used in this context. This guide demonstrates how to build few-shot examples of tool calls to help steer the behavior of extraction and similar applications.\n", + "Data extraction attempts to generate [structured representations](/docs/concepts/structured_outputs/) of information found in text and other unstructured or semi-structured formats. [Tool-calling](/docs/concepts/tool_calling) LLM features are often used in this context. This guide demonstrates how to build few-shot examples of tool calls to help steer the behavior of extraction and similar applications.\n", "\n", ":::tip\n", "While this guide focuses how to use examples with a tool calling model, this technique is generally applicable, and will work\n", diff --git a/docs/docs/how_to/extraction_parse.ipynb b/docs/docs/how_to/extraction_parse.ipynb index 8d6c3a1a041730..5ec9c86348d99d 100644 --- a/docs/docs/how_to/extraction_parse.ipynb +++ b/docs/docs/how_to/extraction_parse.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to use prompting alone (no tool calling) to do extraction\n", "\n", - "Tool calling features are not required for generating structured output from LLMs. LLMs that are able to follow prompt instructions well can be tasked with outputting information in a given format.\n", + "[Tool calling](/docs/concepts/tool_calling/) features are not required for generating structured output from LLMs. LLMs that are able to follow prompt instructions well can be tasked with outputting information in a given format.\n", "\n", "This approach relies on designing good prompts and then parsing the output of the LLMs to make them extract information well.\n", "\n", diff --git a/docs/docs/how_to/few_shot_examples.ipynb b/docs/docs/how_to/few_shot_examples.ipynb index 6c8d0926f03e8e..6fab31f8ff15ac 100644 --- a/docs/docs/how_to/few_shot_examples.ipynb +++ b/docs/docs/how_to/few_shot_examples.ipynb @@ -27,7 +27,7 @@ "\n", ":::\n", "\n", - "In this guide, we'll learn how to create a simple prompt template that provides the model with example inputs and outputs when generating. Providing the LLM with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", + "In this guide, we'll learn how to create a simple prompt template that provides the model with example inputs and outputs when generating. Providing the LLM with a few such examples is called [few-shotting](/docs/concepts/few_shot_prompting/), and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", "\n", "A few-shot prompt template can be constructed from either a set of examples, or from an [Example Selector](https://python.langchain.com/api_reference/core/example_selectors/langchain_core.example_selectors.base.BaseExampleSelector.html) class responsible for choosing a subset of examples from the defined set.\n", "\n", diff --git a/docs/docs/how_to/few_shot_examples_chat.ipynb b/docs/docs/how_to/few_shot_examples_chat.ipynb index 51e41f65e409e0..3be13f6cbf5d52 100644 --- a/docs/docs/how_to/few_shot_examples_chat.ipynb +++ b/docs/docs/how_to/few_shot_examples_chat.ipynb @@ -27,7 +27,7 @@ "\n", ":::\n", "\n", - "This guide covers how to prompt a chat model with example inputs and outputs. Providing the model with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", + "This guide covers how to prompt a chat model with example inputs and outputs. Providing the model with a few such examples is called [few-shotting](/docs/concepts/few_shot_prompting/), and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n", "\n", "There does not appear to be solid consensus on how best to do few-shot prompting, and the optimal prompt compilation will likely vary by model. Because of this, we provide few-shot prompt templates like the [FewShotChatMessagePromptTemplate](https://python.langchain.com/api_reference/core/prompts/langchain_core.prompts.few_shot.FewShotChatMessagePromptTemplate.html?highlight=fewshot#langchain_core.prompts.few_shot.FewShotChatMessagePromptTemplate) as a flexible starting point, and you can modify or replace them as you see fit.\n", "\n", diff --git a/docs/docs/how_to/filter_messages.ipynb b/docs/docs/how_to/filter_messages.ipynb index 794ef630326155..108ee908645bc7 100644 --- a/docs/docs/how_to/filter_messages.ipynb +++ b/docs/docs/how_to/filter_messages.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to filter messages\n", "\n", - "In more complex chains and agents we might track state with a list of messages. This list can start to accumulate messages from multiple different models, speakers, sub-chains, etc., and we may only want to pass subsets of this full list of messages to each model call in the chain/agent.\n", + "In more complex chains and agents we might track state with a list of [messages](/docs/concepts/messages/). This list can start to accumulate messages from multiple different models, speakers, sub-chains, etc., and we may only want to pass subsets of this full list of messages to each model call in the chain/agent.\n", "\n", "The `filter_messages` utility makes it easy to filter messages by type, id, or name.\n", "\n", diff --git a/docs/docs/how_to/graph_constructing.ipynb b/docs/docs/how_to/graph_constructing.ipynb index 79b9e1463f4a2f..5ca45d736453fc 100644 --- a/docs/docs/how_to/graph_constructing.ipynb +++ b/docs/docs/how_to/graph_constructing.ipynb @@ -15,7 +15,7 @@ "source": [ "# How to construct knowledge graphs\n", "\n", - "In this guide we'll go over the basic ways of constructing a knowledge graph based on unstructured text. The constructured graph can then be used as knowledge base in a RAG application.\n", + "In this guide we'll go over the basic ways of constructing a knowledge graph based on unstructured text. The constructured graph can then be used as knowledge base in a [RAG](/docs/concepts/rag/) application.\n", "\n", "## ⚠️ Security note ⚠️\n", "\n", @@ -68,7 +68,7 @@ "metadata": {}, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ " Β·Β·Β·Β·Β·Β·Β·Β·\n" diff --git a/docs/docs/how_to/hybrid.ipynb b/docs/docs/how_to/hybrid.ipynb index 5f45061f66e911..55b13579cef4ba 100644 --- a/docs/docs/how_to/hybrid.ipynb +++ b/docs/docs/how_to/hybrid.ipynb @@ -9,7 +9,7 @@ "source": [ "# Hybrid Search\n", "\n", - "The standard search in LangChain is done by vector similarity. However, a number of vectorstores implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, Qdrant...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as \"Hybrid\" search.\n", + "The standard search in LangChain is done by vector similarity. However, a number of [vector store](/docs/integrations/vectorstores/) implementations (Astra DB, ElasticSearch, Neo4J, AzureSearch, Qdrant...) also support more advanced search combining vector similarity search and other search techniques (full-text, BM25, and so on). This is generally referred to as \"Hybrid\" search.\n", "\n", "**Step 1: Make sure the vectorstore you are using supports hybrid search**\n", "\n", diff --git a/docs/docs/how_to/indexing.ipynb b/docs/docs/how_to/indexing.ipynb index 904424a1ac03b8..e3e6ec8aef6d7b 100644 --- a/docs/docs/how_to/indexing.ipynb +++ b/docs/docs/how_to/indexing.ipynb @@ -9,7 +9,7 @@ "\n", "Here, we will look at a basic indexing workflow using the LangChain indexing API. \n", "\n", - "The indexing API lets you load and keep in sync documents from any source into a vector store. Specifically, it helps:\n", + "The indexing API lets you load and keep in sync documents from any source into a [vector store](/docs/concepts/vectorstores/). Specifically, it helps:\n", "\n", "* Avoid writing duplicated content into the vector store\n", "* Avoid re-writing unchanged content\n", diff --git a/docs/docs/how_to/lcel_cheatsheet.ipynb b/docs/docs/how_to/lcel_cheatsheet.ipynb index fb67e0cd7cf9f6..20e825fe27b79b 100644 --- a/docs/docs/how_to/lcel_cheatsheet.ipynb +++ b/docs/docs/how_to/lcel_cheatsheet.ipynb @@ -7,7 +7,7 @@ "source": [ "# LangChain Expression Language Cheatsheet\n", "\n", - "This is a quick reference for all the most important LCEL primitives. For more advanced usage see the [LCEL how-to guides](/docs/how_to/#langchain-expression-language-lcel) and the [full API reference](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html).\n", + "This is a quick reference for all the most important [LCEL](/docs/concepts/lcel/) primitives. For more advanced usage see the [LCEL how-to guides](/docs/how_to/#langchain-expression-language-lcel) and the [full API reference](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html).\n", "\n", "### Invoke a runnable\n", "#### [Runnable.invoke()](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.invoke) / [Runnable.ainvoke()](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.ainvoke)" diff --git a/docs/docs/how_to/llm_caching.ipynb b/docs/docs/how_to/llm_caching.ipynb index 1ed564b393e06b..6fc2136922178c 100644 --- a/docs/docs/how_to/llm_caching.ipynb +++ b/docs/docs/how_to/llm_caching.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to cache LLM responses\n", "\n", - "LangChain provides an optional caching layer for LLMs. This is useful for two reasons:\n", + "LangChain provides an optional [caching](/docs/concepts/chat_models/#caching) layer for LLMs. This is useful for two reasons:\n", "\n", "It can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times.\n", "It can speed up your application by reducing the number of API calls you make to the LLM provider.\n" diff --git a/docs/docs/how_to/llm_token_usage_tracking.ipynb b/docs/docs/how_to/llm_token_usage_tracking.ipynb index 2f1a8a92c1e130..a4eb596b764c4e 100644 --- a/docs/docs/how_to/llm_token_usage_tracking.ipynb +++ b/docs/docs/how_to/llm_token_usage_tracking.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to track token usage for LLMs\n", "\n", - "Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n", + "Tracking [token](/docs/concepts/tokens/) usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n", "\n", ":::info Prerequisites\n", "\n", diff --git a/docs/docs/how_to/logprobs.ipynb b/docs/docs/how_to/logprobs.ipynb index 6033fd97823bfb..47bfa013a7e178 100644 --- a/docs/docs/how_to/logprobs.ipynb +++ b/docs/docs/how_to/logprobs.ipynb @@ -11,10 +11,11 @@ "\n", "This guide assumes familiarity with the following concepts:\n", "- [Chat models](/docs/concepts/chat_models)\n", + "- [Tokens](/docs/concepts/tokens)\n", "\n", ":::\n", "\n", - "Certain chat models can be configured to return token-level log probabilities representing the likelihood of a given token. This guide walks through how to get this information in LangChain." + "Certain [chat models](/docs/concepts/chat_models/) can be configured to return token-level log probabilities representing the likelihood of a given token. This guide walks through how to get this information in LangChain." ] }, { diff --git a/docs/docs/how_to/merge_message_runs.ipynb b/docs/docs/how_to/merge_message_runs.ipynb index e115eef2954ff3..ff9aaee593fa1b 100644 --- a/docs/docs/how_to/merge_message_runs.ipynb +++ b/docs/docs/how_to/merge_message_runs.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to merge consecutive messages of the same type\n", "\n", - "Certain models do not support passing in consecutive messages of the same type (a.k.a. \"runs\" of the same message type).\n", + "Certain models do not support passing in consecutive [messages](/docs/concepts/messages/) of the same type (a.k.a. \"runs\" of the same message type).\n", "\n", "The `merge_message_runs` utility makes it easy to merge consecutive messages of the same type.\n", "\n", diff --git a/docs/docs/how_to/multi_vector.ipynb b/docs/docs/how_to/multi_vector.ipynb index 69b4b0df0e0466..a68086b14fa2d2 100644 --- a/docs/docs/how_to/multi_vector.ipynb +++ b/docs/docs/how_to/multi_vector.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to retrieve using multiple vectors per document\n", "\n", - "It can often be useful to store multiple vectors per document. There are multiple use cases where this is beneficial. For example, we can embed multiple chunks of a document and associate those embeddings with the parent document, allowing retriever hits on the chunks to return the larger document.\n", + "It can often be useful to store multiple [vectors](/docs/concepts/vectorstores/) per document. There are multiple use cases where this is beneficial. For example, we can [embed](/docs/concepts/embedding_models/) multiple chunks of a document and associate those embeddings with the parent document, allowing [retriever](/docs/concepts/retrievers/) hits on the chunks to return the larger document.\n", "\n", "LangChain implements a base [MultiVectorRetriever](https://python.langchain.com/api_reference/langchain/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html), which simplifies this process. Much of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n", "\n", diff --git a/docs/docs/how_to/multimodal_inputs.ipynb b/docs/docs/how_to/multimodal_inputs.ipynb index 6d0b0b736a4715..f1eff275f60fab 100644 --- a/docs/docs/how_to/multimodal_inputs.ipynb +++ b/docs/docs/how_to/multimodal_inputs.ipynb @@ -7,11 +7,11 @@ "source": [ "# How to pass multimodal data directly to models\n", "\n", - "Here we demonstrate how to pass multimodal input directly to models. \n", + "Here we demonstrate how to pass [multimodal](/docs/concepts/multimodality/) input directly to models. \n", "We currently expect all input to be passed in the same format as [OpenAI expects](https://platform.openai.com/docs/guides/vision).\n", "For other model providers that support multimodal input, we have added logic inside the class to convert to the expected format.\n", "\n", - "In this example we will ask a model to describe an image." + "In this example we will ask a [model](/docs/concepts/chat_models/#multimodality) to describe an image." ] }, { diff --git a/docs/docs/how_to/multimodal_prompts.ipynb b/docs/docs/how_to/multimodal_prompts.ipynb index a9cfc618a4bfe5..321e6efa8b0cc5 100644 --- a/docs/docs/how_to/multimodal_prompts.ipynb +++ b/docs/docs/how_to/multimodal_prompts.ipynb @@ -7,9 +7,9 @@ "source": [ "# How to use multimodal prompts\n", "\n", - "Here we demonstrate how to use prompt templates to format multimodal inputs to models. \n", + "Here we demonstrate how to use prompt templates to format [multimodal](/docs/concepts/multimodality/) inputs to models. \n", "\n", - "In this example we will ask a model to describe an image." + "In this example we will ask a [model](/docs/concepts/chat_models/#multimodality) to describe an image." ] }, { diff --git a/docs/docs/how_to/output_parser_custom.ipynb b/docs/docs/how_to/output_parser_custom.ipynb index a8cca984b6984c..d77e1ff9c6ae7e 100644 --- a/docs/docs/how_to/output_parser_custom.ipynb +++ b/docs/docs/how_to/output_parser_custom.ipynb @@ -7,11 +7,11 @@ "source": [ "# How to create a custom Output Parser\n", "\n", - "In some situations you may want to implement a custom parser to structure the model output into a custom format.\n", + "In some situations you may want to implement a custom [parser](/docs/concepts/output_parsers/) to structure the model output into a custom format.\n", "\n", "There are two ways to implement a custom parser:\n", "\n", - "1. Using `RunnableLambda` or `RunnableGenerator` in LCEL -- we strongly recommend this for most use cases\n", + "1. Using `RunnableLambda` or `RunnableGenerator` in [LCEL](/docs/concepts/lcel/) -- we strongly recommend this for most use cases\n", "2. By inherting from one of the base classes for out parsing -- this is the hard way of doing things\n", "\n", "The difference between the two approaches are mostly superficial and are mainly in terms of which callbacks are triggered (e.g., `on_chain_start` vs. `on_parser_start`), and how a runnable lambda vs. a parser might be visualized in a tracing platform like LangSmith." diff --git a/docs/docs/how_to/output_parser_fixing.ipynb b/docs/docs/how_to/output_parser_fixing.ipynb index 922fcf7adf05b7..89692da147f8b9 100644 --- a/docs/docs/how_to/output_parser_fixing.ipynb +++ b/docs/docs/how_to/output_parser_fixing.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to use the output-fixing parser\n", "\n", - "This output parser wraps another output parser, and in the event that the first one fails it calls out to another LLM to fix any errors.\n", + "This [output parser](/docs/concepts/output_parsers/) wraps another output parser, and in the event that the first one fails it calls out to another LLM to fix any errors.\n", "\n", "But we can do other things besides throw errors. Specifically, we can pass the misformatted output, along with the formatted instructions, to the model and ask it to fix it.\n", "\n", diff --git a/docs/docs/how_to/output_parser_structured.ipynb b/docs/docs/how_to/output_parser_structured.ipynb index 2cb69c7bbb4f60..f9dda3f95e4795 100644 --- a/docs/docs/how_to/output_parser_structured.ipynb +++ b/docs/docs/how_to/output_parser_structured.ipynb @@ -19,7 +19,7 @@ "\n", "Language models output text. But there are times where you want to get more structured information than just text back. While some model providers support [built-in ways to return structured output](/docs/how_to/structured_output), not all do.\n", "\n", - "Output parsers are classes that help structure language model responses. There are two main methods an output parser must implement:\n", + "[Output parsers](/docs/concepts/output_parsers/) are classes that help structure language model responses. There are two main methods an output parser must implement:\n", "\n", "- \"Get format instructions\": A method which returns a string containing instructions for how the output of a language model should be formatted.\n", "- \"Parse\": A method which takes in a string (assumed to be the response from a language model) and parses it into some structure.\n", diff --git a/docs/docs/how_to/output_parser_xml.ipynb b/docs/docs/how_to/output_parser_xml.ipynb index d01b5990feda04..dd7dac9890363b 100644 --- a/docs/docs/how_to/output_parser_xml.ipynb +++ b/docs/docs/how_to/output_parser_xml.ipynb @@ -20,7 +20,7 @@ "\n", "LLMs from different providers often have different strengths depending on the specific data they are trained on. This also means that some may be \"better\" and more reliable at generating output in formats other than JSON.\n", "\n", - "This guide shows you how to use the [`XMLOutputParser`](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html) to prompt models for XML output, then and parse that output into a usable format.\n", + "This guide shows you how to use the [`XMLOutputParser`](https://python.langchain.com/api_reference/core/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html) to prompt models for XML output, then and [parse](/docs/concepts/output_parsers/) that output into a usable format.\n", "\n", ":::note\n", "Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed XML.\n", diff --git a/docs/docs/how_to/parent_document_retriever.ipynb b/docs/docs/how_to/parent_document_retriever.ipynb index 38b06d64d1a1f4..452c31ace39e62 100644 --- a/docs/docs/how_to/parent_document_retriever.ipynb +++ b/docs/docs/how_to/parent_document_retriever.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to use the Parent Document Retriever\n", "\n", - "When splitting documents for retrieval, there are often conflicting desires:\n", + "When splitting documents for [retrieval](/docs/concepts/retrieval/), there are often conflicting desires:\n", "\n", "1. You may want to have small documents, so that their embeddings can most\n", " accurately reflect their meaning. If too long, then the embeddings can\n", @@ -72,7 +72,7 @@ "source": [ "## Retrieving full documents\n", "\n", - "In this mode, we want to retrieve the full documents. Therefore, we only specify a child splitter." + "In this mode, we want to retrieve the full documents. Therefore, we only specify a child [splitter](/docs/concepts/text_splitters/)." ] }, { diff --git a/docs/docs/how_to/prompts_composition.ipynb b/docs/docs/how_to/prompts_composition.ipynb index bf8d0f5fb232d8..25b51867b2ebcc 100644 --- a/docs/docs/how_to/prompts_composition.ipynb +++ b/docs/docs/how_to/prompts_composition.ipynb @@ -24,7 +24,7 @@ "\n", ":::\n", "\n", - "LangChain provides a user friendly interface for composing different parts of prompts together. You can do this with either string prompts or chat prompts. Constructing prompts this way allows for easy reuse of components." + "LangChain provides a user friendly interface for composing different parts of [prompts](/docs/concepts/prompt_templates/) together. You can do this with either string prompts or chat prompts. Constructing prompts this way allows for easy reuse of components." ] }, { diff --git a/docs/docs/how_to/prompts_partial.ipynb b/docs/docs/how_to/prompts_partial.ipynb index b32e2586c17e98..4abf02b5323b29 100644 --- a/docs/docs/how_to/prompts_partial.ipynb +++ b/docs/docs/how_to/prompts_partial.ipynb @@ -24,7 +24,7 @@ "\n", ":::\n", "\n", - "Like partially binding arguments to a function, it can make sense to \"partial\" a prompt template - e.g. pass in a subset of the required values, as to create a new prompt template which expects only the remaining subset of values.\n", + "Like partially binding arguments to a function, it can make sense to \"partial\" a [prompt template](/docs/concepts/prompt_templates/) - e.g. pass in a subset of the required values, as to create a new prompt template which expects only the remaining subset of values.\n", "\n", "LangChain supports this in two ways:\n", "\n", diff --git a/docs/docs/how_to/qa_chat_history_how_to.ipynb b/docs/docs/how_to/qa_chat_history_how_to.ipynb index c757e5ef35c127..0c82ac75f9a55d 100644 --- a/docs/docs/how_to/qa_chat_history_how_to.ipynb +++ b/docs/docs/how_to/qa_chat_history_how_to.ipynb @@ -19,7 +19,7 @@ ":::\n", "\n", "\n", - "In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n", + "In many [Q&A applications](/docs/concepts/rag/) we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n", "\n", "In this guide we focus on **adding logic for incorporating historical messages.**\n", "\n", diff --git a/docs/docs/how_to/qa_citations.ipynb b/docs/docs/how_to/qa_citations.ipynb index fda1b1f1daac57..d2b61428771dda 100644 --- a/docs/docs/how_to/qa_citations.ipynb +++ b/docs/docs/how_to/qa_citations.ipynb @@ -19,7 +19,7 @@ "\n", "We generally suggest using the first item of the list that works for your use-case. That is, if your model supports tool-calling, try methods 1 or 2; otherwise, or if those fail, advance down the list.\n", "\n", - "Let's first create a simple RAG chain. To start we'll just retrieve from Wikipedia using the [WikipediaRetriever](https://python.langchain.com/api_reference/community/retrievers/langchain_community.retrievers.wikipedia.WikipediaRetriever.html)." + "Let's first create a simple [RAG](/docs/concepts/rag/) chain. To start we'll just retrieve from Wikipedia using the [WikipediaRetriever](https://python.langchain.com/api_reference/community/retrievers/langchain_community.retrievers.wikipedia.WikipediaRetriever.html)." ] }, { @@ -140,7 +140,7 @@ "id": "c89e2045-9244-43e6-bf3f-59af22658529", "metadata": {}, "source": [ - "Now that we've got a model, retriver and prompt, let's chain them all together. We'll need to add some logic for formatting our retrieved Documents to a string that can be passed to our prompt. Following the how-to guide on [adding citations](/docs/how_to/qa_citations) to a RAG application, we'll make it so our chain returns both the answer and the retrieved Documents." + "Now that we've got a [model](/docs/concepts/chat_models/), [retriver](/docs/concepts/retrievers/) and [prompt](/docs/concepts/prompt_templates/), let's chain them all together. We'll need to add some logic for formatting our retrieved Documents to a string that can be passed to our prompt. Following the how-to guide on [adding citations](/docs/how_to/qa_citations) to a RAG application, we'll make it so our chain returns both the answer and the retrieved Documents." ] }, { diff --git a/docs/docs/how_to/qa_per_user.ipynb b/docs/docs/how_to/qa_per_user.ipynb index 65d4371b912be5..08d0592f803f78 100644 --- a/docs/docs/how_to/qa_per_user.ipynb +++ b/docs/docs/how_to/qa_per_user.ipynb @@ -7,9 +7,9 @@ "source": [ "# How to do per-user retrieval\n", "\n", - "This guide demonstrates how to configure runtime properties of a retrieval chain. An example application is to limit the documents available to a retriever based on the user.\n", + "This guide demonstrates how to configure runtime properties of a retrieval chain. An example application is to limit the documents available to a [retriever](/docs/concepts/retrievers/) based on the user.\n", "\n", - "When building a retrieval app, you often have to build it with multiple users in mind. This means that you may be storing data not just for one user, but for many different users, and they should not be able to see eachother's data. This means that you need to be able to configure your retrieval chain to only retrieve certain information. This generally involves two steps.\n", + "When building a [retrieval app](/docs/concepts/rag/), you often have to build it with multiple users in mind. This means that you may be storing data not just for one user, but for many different users, and they should not be able to see eachother's data. This means that you need to be able to configure your retrieval chain to only retrieve certain information. This generally involves two steps.\n", "\n", "**Step 1: Make sure the retriever you are using supports multiple users**\n", "\n", diff --git a/docs/docs/how_to/qa_sources.ipynb b/docs/docs/how_to/qa_sources.ipynb index c9d9ce7330f6b6..eccf8d070e390b 100644 --- a/docs/docs/how_to/qa_sources.ipynb +++ b/docs/docs/how_to/qa_sources.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to get your RAG application to return sources\n", "\n", - "Often in Q&A applications it's important to show users the sources that were used to generate the answer. The simplest way to do this is for the chain to return the Documents that were retrieved in each generation.\n", + "Often in [Q&A](/docs/concepts/rag/) applications it's important to show users the sources that were used to generate the answer. The simplest way to do this is for the chain to return the Documents that were retrieved in each generation.\n", "\n", "We'll work off of the Q&A app we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [RAG tutorial](/docs/tutorials/rag).\n", "\n", diff --git a/docs/docs/how_to/qa_streaming.ipynb b/docs/docs/how_to/qa_streaming.ipynb index 18e19fbcb99578..faca44cf0e3765 100644 --- a/docs/docs/how_to/qa_streaming.ipynb +++ b/docs/docs/how_to/qa_streaming.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to stream results from your RAG application\n", "\n", - "This guide explains how to stream results from a RAG application. It covers streaming tokens from the final output as well as intermediate steps of a chain (e.g., from query re-writing).\n", + "This guide explains how to stream results from a [RAG](/docs/concepts/rag/) application. It covers streaming tokens from the final output as well as intermediate steps of a chain (e.g., from query re-writing).\n", "\n", "We'll work off of the Q&A app with sources we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [RAG tutorial](/docs/tutorials/rag)." ] diff --git a/docs/docs/how_to/query_few_shot.ipynb b/docs/docs/how_to/query_few_shot.ipynb index 2b087ae214c315..8955c4490c9ee0 100644 --- a/docs/docs/how_to/query_few_shot.ipynb +++ b/docs/docs/how_to/query_few_shot.ipynb @@ -17,7 +17,7 @@ "source": [ "# How to add examples to the prompt for query analysis\n", "\n", - "As our query analysis becomes more complex, the LLM may struggle to understand how exactly it should respond in certain scenarios. In order to improve performance here, we can add examples to the prompt to guide the LLM.\n", + "As our query analysis becomes more complex, the LLM may struggle to understand how exactly it should respond in certain scenarios. In order to improve performance here, we can [add examples](/docs/concepts/few_shot_prompting/) to the prompt to guide the LLM.\n", "\n", "Let's take a look at how we can add examples for the LangChain YouTube video query analyzer we built in the [Quickstart](/docs/tutorials/query_analysis)." ] diff --git a/docs/docs/how_to/query_multiple_retrievers.ipynb b/docs/docs/how_to/query_multiple_retrievers.ipynb index 9e6d7861dca10b..6cde2876b4d005 100644 --- a/docs/docs/how_to/query_multiple_retrievers.ipynb +++ b/docs/docs/how_to/query_multiple_retrievers.ipynb @@ -17,7 +17,7 @@ "source": [ "# How to handle multiple retrievers when doing query analysis\n", "\n", - "Sometimes, a query analysis technique may allow for selection of which retriever to use. To use this, you will need to add some logic to select the retriever to do. We will show a simple example (using mock data) of how to do that." + "Sometimes, a query analysis technique may allow for selection of which [retriever](/docs/concepts/retrievers/) to use. To use this, you will need to add some logic to select the retriever to do. We will show a simple example (using mock data) of how to do that." ] }, { diff --git a/docs/docs/how_to/recursive_json_splitter.ipynb b/docs/docs/how_to/recursive_json_splitter.ipynb index 9936ed9ebcb7fc..57e97af1bf64b3 100644 --- a/docs/docs/how_to/recursive_json_splitter.ipynb +++ b/docs/docs/how_to/recursive_json_splitter.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to split JSON data\n", "\n", - "This json splitter splits json data while allowing control over chunk sizes. It traverses json data depth first and builds smaller json chunks. It attempts to keep nested json objects whole but will split them if needed to keep chunks between a min_chunk_size and the max_chunk_size.\n", + "This json splitter [splits](/docs/concepts/text_splitters/) json data while allowing control over chunk sizes. It traverses json data depth first and builds smaller json chunks. It attempts to keep nested json objects whole but will split them if needed to keep chunks between a min_chunk_size and the max_chunk_size.\n", "\n", "If the value is not a nested json, but rather a very large string the string will not be split. If you need a hard cap on the chunk size consider composing this with a Recursive Text splitter on those chunks. There is an optional pre-processing step to split lists, by first converting them to json (dict) and then splitting them as such.\n", "\n", diff --git a/docs/docs/how_to/recursive_text_splitter.ipynb b/docs/docs/how_to/recursive_text_splitter.ipynb index ce77b89f9d1a5d..166fa59e874a03 100644 --- a/docs/docs/how_to/recursive_text_splitter.ipynb +++ b/docs/docs/how_to/recursive_text_splitter.ipynb @@ -21,7 +21,7 @@ "source": [ "# How to recursively split text by characters\n", "\n", - "This text splitter is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is `[\"\\n\\n\", \"\\n\", \" \", \"\"]`. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.\n", + "This [text splitter](/docs/concepts/text_splitters/) is the recommended one for generic text. It is parameterized by a list of characters. It tries to split on them in order until the chunks are small enough. The default list is `[\"\\n\\n\", \"\\n\", \" \", \"\"]`. This has the effect of trying to keep all paragraphs (and then sentences, and then words) together as long as possible, as those would generically seem to be the strongest semantically related pieces of text.\n", "\n", "1. How the text is split: by list of characters.\n", "2. How the chunk size is measured: by number of characters.\n", diff --git a/docs/docs/how_to/response_metadata.ipynb b/docs/docs/how_to/response_metadata.ipynb index 773333d09dc560..150cf6a89bc21b 100644 --- a/docs/docs/how_to/response_metadata.ipynb +++ b/docs/docs/how_to/response_metadata.ipynb @@ -7,7 +7,7 @@ "source": [ "# Response metadata\n", "\n", - "Many model providers include some metadata in their chat generation responses. This metadata can be accessed via the `AIMessage.response_metadata: Dict` attribute. Depending on the model provider and model configuration, this can contain information like [token counts](/docs/how_to/chat_token_usage_tracking), [logprobs](/docs/how_to/logprobs), and more.\n", + "Many model providers include some metadata in their chat generation [responses](/docs/concepts/messages/#aimessage). This metadata can be accessed via the `AIMessage.response_metadata: Dict` attribute. Depending on the model provider and model configuration, this can contain information like [token counts](/docs/how_to/chat_token_usage_tracking), [logprobs](/docs/how_to/logprobs), and more.\n", "\n", "Here's what the response metadata looks like for a few different providers:\n", "\n", diff --git a/docs/docs/how_to/runnable_runtime_secrets.ipynb b/docs/docs/how_to/runnable_runtime_secrets.ipynb index 819cdf1cbe60ce..5a6b5cfa1ef5f1 100644 --- a/docs/docs/how_to/runnable_runtime_secrets.ipynb +++ b/docs/docs/how_to/runnable_runtime_secrets.ipynb @@ -11,7 +11,7 @@ "\n", ":::\n", "\n", - "We can pass in secrets to our runnables at runtime using the `RunnableConfig`. Specifically we can pass in secrets with a `__` prefix to the `configurable` field. This will ensure that these secrets aren't traced as part of the invocation:" + "We can pass in secrets to our [runnables](/docs/concepts/runnables/) at runtime using the `RunnableConfig`. Specifically we can pass in secrets with a `__` prefix to the `configurable` field. This will ensure that these secrets aren't traced as part of the invocation:" ] }, { diff --git a/docs/docs/how_to/self_query.ipynb b/docs/docs/how_to/self_query.ipynb index b07e6a89b15826..b85f4c95964494 100644 --- a/docs/docs/how_to/self_query.ipynb +++ b/docs/docs/how_to/self_query.ipynb @@ -13,7 +13,7 @@ "\n", ":::\n", "\n", - "A self-querying retriever is one that, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses a query-constructing LLM chain to write a structured query and then applies that structured query to its underlying VectorStore. This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documents but to also extract filters from the user query on the metadata of stored documents and to execute those filters.\n", + "A self-querying [retriever](/docs/concepts/retrievers/) is one that, as the name suggests, has the ability to query itself. Specifically, given any natural language query, the retriever uses a query-constructing LLM chain to write a structured query and then applies that structured query to its underlying [vector store](/docs/concepts/vectorstores/). This allows the retriever to not only use the user-input query for semantic similarity comparison with the contents of stored documents but to also extract filters from the user query on the metadata of stored documents and to execute those filters.\n", "\n", "![](../../static/img/self_querying.jpg)\n", "\n", @@ -95,7 +95,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains.query_constructor.base import AttributeInfo\n", + "from langchain.chains.query_constructor.schema import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", "from langchain_openai import ChatOpenAI\n", "\n", diff --git a/docs/docs/how_to/split_by_token.ipynb b/docs/docs/how_to/split_by_token.ipynb index 87aad35bc64608..047f4777f3474e 100644 --- a/docs/docs/how_to/split_by_token.ipynb +++ b/docs/docs/how_to/split_by_token.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to split text by tokens \n", "\n", - "Language models have a token limit. You should not exceed the token limit. When you split your text into chunks it is therefore a good idea to count the number of tokens. There are many tokenizers. When you count tokens in your text you should use the same tokenizer as used in the language model. " + "Language models have a [token](/docs/concepts/tokens/) limit. You should not exceed the token limit. When you [split your text](/docs/concepts/text_splitters/) into chunks it is therefore a good idea to count the number of tokens. There are many tokenizers. When you count tokens in your text you should use the same tokenizer as used in the language model. " ] }, { diff --git a/docs/docs/how_to/sql_prompting.ipynb b/docs/docs/how_to/sql_prompting.ipynb index 0cd1a1c2626d4e..831a7bca13a51d 100644 --- a/docs/docs/how_to/sql_prompting.ipynb +++ b/docs/docs/how_to/sql_prompting.ipynb @@ -12,7 +12,7 @@ "\n", "- How the dialect of the LangChain [SQLDatabase](https://python.langchain.com/api_reference/community/utilities/langchain_community.utilities.sql_database.SQLDatabase.html) impacts the prompt of the chain;\n", "- How to format schema information into the prompt using `SQLDatabase.get_context`;\n", - "- How to build and select few-shot examples to assist the model.\n", + "- How to build and select [few-shot examples](/docs/concepts/few_shot_prompting/) to assist the model.\n", "\n", "## Setup\n", "\n", diff --git a/docs/docs/how_to/structured_output.ipynb b/docs/docs/how_to/structured_output.ipynb index 04171ac72cb3d3..4eaf09d3569ef2 100644 --- a/docs/docs/how_to/structured_output.ipynb +++ b/docs/docs/how_to/structured_output.ipynb @@ -29,7 +29,7 @@ "- [Function/tool calling](/docs/concepts/tool_calling)\n", ":::\n", "\n", - "It is often useful to have a model return output that matches a specific schema. One common use-case is extracting data from text to insert into a database or use with some other downstream system. This guide covers a few strategies for getting structured outputs from a model.\n", + "It is often useful to have a model return output that matches a specific [schema](/docs/concepts/structured_outputs/). One common use-case is extracting data from text to insert into a database or use with some other downstream system. This guide covers a few strategies for getting structured outputs from a model.\n", "\n", "## The `.with_structured_output()` method\n", "\n", @@ -41,9 +41,9 @@ "\n", ":::\n", "\n", - "This is the easiest and most reliable way to get structured outputs. `with_structured_output()` is implemented for models that provide native APIs for structuring outputs, like tool/function calling or JSON mode, and makes use of these capabilities under the hood.\n", + "This is the easiest and most reliable way to get structured outputs. `with_structured_output()` is implemented for [models that provide native APIs for structuring outputs](/docs/integrations/chat/), like tool/function calling or JSON mode, and makes use of these capabilities under the hood.\n", "\n", - "This method takes a schema as input which specifies the names, types, and descriptions of the desired output attributes. The method returns a model-like Runnable, except that instead of outputting strings or Messages it outputs objects corresponding to the given schema. The schema can be specified as a TypedDict class, [JSON Schema](https://json-schema.org/) or a Pydantic class. If TypedDict or JSON Schema are used then a dictionary will be returned by the Runnable, and if a Pydantic class is used then a Pydantic object will be returned.\n", + "This method takes a schema as input which specifies the names, types, and descriptions of the desired output attributes. The method returns a model-like Runnable, except that instead of outputting strings or [messages](/docs/concepts/messages/) it outputs objects corresponding to the given schema. The schema can be specified as a TypedDict class, [JSON Schema](https://json-schema.org/) or a Pydantic class. If TypedDict or JSON Schema are used then a dictionary will be returned by the Runnable, and if a Pydantic class is used then a Pydantic object will be returned.\n", "\n", "As an example, let's get a model to generate a joke and separate the setup from the punchline:\n", "\n", diff --git a/docs/docs/how_to/summarize_stuff.ipynb b/docs/docs/how_to/summarize_stuff.ipynb index 86fbb86e10811d..73f8f4ffd73776 100644 --- a/docs/docs/how_to/summarize_stuff.ipynb +++ b/docs/docs/how_to/summarize_stuff.ipynb @@ -30,7 +30,7 @@ "source": [ "## Load chat model\n", "\n", - "Let's first load a chat model:\n", + "Let's first load a [chat model](/docs/concepts/chat_models/):\n", "\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", diff --git a/docs/docs/how_to/time_weighted_vectorstore.ipynb b/docs/docs/how_to/time_weighted_vectorstore.ipynb index 91ba6e72f3b255..d14b375704625c 100644 --- a/docs/docs/how_to/time_weighted_vectorstore.ipynb +++ b/docs/docs/how_to/time_weighted_vectorstore.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to use a time-weighted vector store retriever\n", "\n", - "This retriever uses a combination of semantic similarity and a time decay.\n", + "This [retriever](/docs/concepts/retrievers/) uses a combination of semantic [similarity](/docs/concepts/embedding_models/#measure-similarity) and a time decay.\n", "\n", "The algorithm for scoring them is:\n", "\n", diff --git a/docs/docs/how_to/tool_artifacts.ipynb b/docs/docs/how_to/tool_artifacts.ipynb index a6fa076c0c14c9..f99978aedd84aa 100644 --- a/docs/docs/how_to/tool_artifacts.ipynb +++ b/docs/docs/how_to/tool_artifacts.ipynb @@ -16,7 +16,7 @@ "\n", ":::\n", "\n", - "Tools are utilities that can be called by a model, and whose outputs are designed to be fed back to a model. Sometimes, however, there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns a custom object, a dataframe or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n", + "[Tools](/docs/concepts/tools/) are utilities that can be [called by a model](/docs/concepts/tool_calling/), and whose outputs are designed to be fed back to a model. Sometimes, however, there are artifacts of a tool's execution that we want to make accessible to downstream components in our chain or agent, but that we don't want to expose to the model itself. For example if a tool returns a custom object, a dataframe or an image, we may want to pass some metadata about this output to the model without passing the actual output to the model. At the same time, we may want to be able to access this full output elsewhere, for example in downstream tools.\n", "\n", "The Tool and [ToolMessage](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.ToolMessage.html) interfaces make it possible to distinguish between the parts of the tool output meant for the model (this is the ToolMessage.content) and those parts which are meant for use outside the model (ToolMessage.artifact).\n", "\n", diff --git a/docs/docs/how_to/tool_choice.ipynb b/docs/docs/how_to/tool_choice.ipynb index 0a8c6cdf05c0de..0a7ad581e0df71 100644 --- a/docs/docs/how_to/tool_choice.ipynb +++ b/docs/docs/how_to/tool_choice.ipynb @@ -14,7 +14,7 @@ "- [How to use a model to call tools](/docs/how_to/tool_calling)\n", ":::\n", "\n", - "In order to force our LLM to select a specific tool, we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:" + "In order to force our LLM to select a specific [tool](/docs/concepts/tools/), we can use the `tool_choice` parameter to ensure certain behavior. First, let's define our model and tools:" ] }, { diff --git a/docs/docs/how_to/tool_configure.ipynb b/docs/docs/how_to/tool_configure.ipynb index 474d2f3c5fea99..64305d26a1fa15 100644 --- a/docs/docs/how_to/tool_configure.ipynb +++ b/docs/docs/how_to/tool_configure.ipynb @@ -17,9 +17,9 @@ "\n", ":::\n", "\n", - "If you have a tool that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n", + "If you have a [tool](/docs/concepts/tools/) that calls [chat models](/docs/concepts/chat_models/), [retrievers](/docs/concepts/retrievers/), or other [runnables](/docs/concepts/runnables/), you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n", "\n", - "Tools are runnables, and you can treat them the same way as any other runnable at the interface level - you can call `invoke()`, `batch()`, and `stream()` on them as normal. However, when writing custom tools, you may want to invoke other runnables like chat models or retrievers. In order to properly trace and configure those sub-invocations, you'll need to manually access and pass in the tool's current [`RunnableConfig`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.config.RunnableConfig.html) object. This guide show you some examples of how to do that.\n", + "Tools are [runnables](/docs/concepts/runnables/), and you can treat them the same way as any other runnable at the interface level - you can call `invoke()`, `batch()`, and `stream()` on them as normal. However, when writing custom tools, you may want to invoke other runnables like chat models or retrievers. In order to properly trace and configure those sub-invocations, you'll need to manually access and pass in the tool's current [`RunnableConfig`](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.config.RunnableConfig.html) object. This guide show you some examples of how to do that.\n", "\n", ":::caution Compatibility\n", "\n", diff --git a/docs/docs/how_to/tool_runtime.ipynb b/docs/docs/how_to/tool_runtime.ipynb index afc8c1c5e4d747..3356aaa059f9f6 100644 --- a/docs/docs/how_to/tool_runtime.ipynb +++ b/docs/docs/how_to/tool_runtime.ipynb @@ -21,7 +21,7 @@ " [\"langchain-core\", \"0.2.21\"],\n", "]} />\n", "\n", - "You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n", + "You may need to bind values to a [tool](/docs/concepts/tools/) that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.\n", "\n", "Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.\n", "\n", diff --git a/docs/docs/how_to/tool_stream_events.ipynb b/docs/docs/how_to/tool_stream_events.ipynb index 5b552362f4650f..16134a029210c1 100644 --- a/docs/docs/how_to/tool_stream_events.ipynb +++ b/docs/docs/how_to/tool_stream_events.ipynb @@ -16,7 +16,7 @@ "\n", ":::\n", "\n", - "If you have tools that call chat models, retrievers, or other runnables, you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n", + "If you have [tools](/docs/concepts/tools/) that call [chat models](/docs/concepts/chat_models/), [retrievers](/docs/concepts/retrievers/), or other [runnables](/docs/concepts/runnables/), you may want to access internal events from those runnables or configure them with additional properties. This guide shows you how to manually pass parameters properly so that you can do this using the `astream_events()` method.\n", "\n", ":::caution Compatibility\n", "\n", diff --git a/docs/docs/how_to/tool_streaming.ipynb b/docs/docs/how_to/tool_streaming.ipynb index b96868d80ffec8..fc85d3c31faeba 100644 --- a/docs/docs/how_to/tool_streaming.ipynb +++ b/docs/docs/how_to/tool_streaming.ipynb @@ -6,7 +6,7 @@ "source": [ "# How to stream tool calls\n", "\n", - "When tools are called in a streaming context, \n", + "When [tools](/docs/concepts/tools/) are called in a streaming context, \n", "[message chunks](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessageChunk.html#langchain_core.messages.ai.AIMessageChunk) \n", "will be populated with [tool call chunk](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.tool.ToolCallChunk.html#langchain_core.messages.tool.ToolCallChunk) \n", "objects in a list via the `.tool_call_chunks` attribute. A `ToolCallChunk` includes \n", diff --git a/docs/docs/how_to/tools_as_openai_functions.ipynb b/docs/docs/how_to/tools_as_openai_functions.ipynb index d321afd0dddd91..a0a1eea2aea1c5 100644 --- a/docs/docs/how_to/tools_as_openai_functions.ipynb +++ b/docs/docs/how_to/tools_as_openai_functions.ipynb @@ -7,7 +7,7 @@ "source": [ "# How to convert tools to OpenAI Functions\n", "\n", - "This notebook goes over how to use LangChain tools as OpenAI functions." + "This notebook goes over how to use LangChain [tools](/docs/concepts/tools/) as OpenAI functions." ] }, { diff --git a/docs/docs/how_to/tools_chain.ipynb b/docs/docs/how_to/tools_chain.ipynb index b54fa2c885f8f8..4497659789cd5b 100644 --- a/docs/docs/how_to/tools_chain.ipynb +++ b/docs/docs/how_to/tools_chain.ipynb @@ -17,7 +17,7 @@ "source": [ "# How to use tools in a chain\n", "\n", - "In this guide, we will go over the basic ways to create Chains and Agents that call Tools. Tools can be just about anything β€”Β APIs, functions, databases, etc. Tools allow us to extend the capabilities of a model beyond just outputting text/messages. The key to using models with tools is correctly prompting a model and parsing its response so that it chooses the right tools and provides the right inputs for them." + "In this guide, we will go over the basic ways to create Chains and Agents that call [Tools](/docs/concepts/tools/). Tools can be just about anything β€”Β APIs, functions, databases, etc. Tools allow us to extend the capabilities of a model beyond just outputting text/messages. The key to using models with tools is correctly prompting a model and parsing its response so that it chooses the right tools and provides the right inputs for them." ] }, { @@ -143,7 +143,7 @@ "![chain](../../static/img/tool_chain.svg)\n", "\n", "### Tool/function calling\n", - "One of the most reliable ways to use tools with LLMs is with tool calling APIs (also sometimes called function calling). This only works with models that explicitly support tool calling. You can see which models support tool calling [here](/docs/integrations/chat/), and learn more about how to use tool calling in [this guide](/docs/how_to/function_calling).\n", + "One of the most reliable ways to use tools with LLMs is with [tool calling](/docs/concepts/tool_calling/) APIs (also sometimes called function calling). This only works with models that explicitly support tool calling. You can see which models support tool calling [here](/docs/integrations/chat/), and learn more about how to use tool calling in [this guide](/docs/how_to/function_calling).\n", "\n", "First we'll define our model and tools. We'll start with just a single tool, `multiply`.\n", "\n", diff --git a/docs/docs/how_to/tools_error.ipynb b/docs/docs/how_to/tools_error.ipynb index 7a881b89fbf3ab..885c288e2af23d 100644 --- a/docs/docs/how_to/tools_error.ipynb +++ b/docs/docs/how_to/tools_error.ipynb @@ -16,7 +16,7 @@ "\n", ":::\n", "\n", - "Calling tools with an LLM is generally more reliable than pure prompting, but it isn't perfect. The model may try to call a tool that doesn't exist or fail to return arguments that match the requested schema. Strategies like keeping schemas simple, reducing the number of tools you pass at once, and having good names and descriptions can help mitigate this risk, but aren't foolproof.\n", + "[Calling tools](/docs/concepts/tool_calling/) with an LLM is generally more reliable than pure prompting, but it isn't perfect. The model may try to call a tool that doesn't exist or fail to return arguments that match the requested schema. Strategies like keeping schemas simple, reducing the number of tools you pass at once, and having good names and descriptions can help mitigate this risk, but aren't foolproof.\n", "\n", "This guide covers some ways to build error handling into your chains to mitigate these failure modes." ] diff --git a/docs/docs/how_to/tools_few_shot.ipynb b/docs/docs/how_to/tools_few_shot.ipynb index 0e3d6564874cde..b9032391a4a3cb 100644 --- a/docs/docs/how_to/tools_few_shot.ipynb +++ b/docs/docs/how_to/tools_few_shot.ipynb @@ -6,7 +6,7 @@ "source": [ "# How to use few-shot prompting with tool calling\n", "\n", - "For more complex tool use it's very useful to add few-shot examples to the prompt. We can do this by adding `AIMessage`s with `ToolCall`s and corresponding `ToolMessage`s to our prompt.\n", + "For more complex tool use it's very useful to add [few-shot examples](/docs/concepts/few_shot_prompting/) to the prompt. We can do this by adding `AIMessage`s with `ToolCall`s and corresponding `ToolMessage`s to our prompt.\n", "\n", "First let's define our tools and model." ] diff --git a/docs/docs/how_to/trim_messages.ipynb b/docs/docs/how_to/trim_messages.ipynb index 97b725dd72cff4..505e7b90195b91 100644 --- a/docs/docs/how_to/trim_messages.ipynb +++ b/docs/docs/how_to/trim_messages.ipynb @@ -20,7 +20,7 @@ "\n", ":::\n", "\n", - "All models have finite context windows, meaning there's a limit to how many tokens they can take as input. If you have very long messages or a chain/agent that accumulates a long message is history, you'll need to manage the length of the messages you're passing in to the model.\n", + "All models have finite context windows, meaning there's a limit to how many [tokens](/docs/concepts/tokens/) they can take as input. If you have very long messages or a chain/agent that accumulates a long message history, you'll need to manage the length of the messages you're passing in to the model.\n", "\n", "[trim_messages](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.utils.trim_messages.html) can be used to reduce the size of a chat history to a specified token count or specified message count.\n", "\n", diff --git a/docs/docs/how_to/vectorstore_retriever.ipynb b/docs/docs/how_to/vectorstore_retriever.ipynb index cc9958eb32a387..f9651f2d9528e2 100644 --- a/docs/docs/how_to/vectorstore_retriever.ipynb +++ b/docs/docs/how_to/vectorstore_retriever.ipynb @@ -17,7 +17,7 @@ "source": [ "# How to use a vectorstore as a retriever\n", "\n", - "A vector store retriever is a retriever that uses a vector store to retrieve documents. It is a lightweight wrapper around the vector store class to make it conform to the retriever interface.\n", + "A vector store retriever is a [retriever](/docs/concepts/retrievers/) that uses a [vector store](/docs/concepts/vectorstores/) to retrieve documents. It is a lightweight wrapper around the vector store class to make it conform to the retriever [interface](/docs/concepts/runnables/).\n", "It uses the search methods implemented by a vector store, like similarity search and MMR, to query the texts in the vector store.\n", "\n", "In this guide we will cover:\n", diff --git a/docs/docs/integrations/chat/reka.ipynb b/docs/docs/integrations/chat/reka.ipynb new file mode 100644 index 00000000000000..1ebedb66979d17 --- /dev/null +++ b/docs/docs/integrations/chat/reka.ipynb @@ -0,0 +1,593 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Reka\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ChatReka\n", + "\n", + "This notebook provides a quick overview for getting started with Reka [chat models](../../concepts/chat_models.mdx). \n", + "\n", + "Reka has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [Reka docs](https://docs.reka.ai/available-models).\n", + "\n", + "\n", + "\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatReka] | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | βœ… | ❌ | ❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain_community?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain_community?style=flat-square&label=%20) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| βœ… | ❌ | ❌ | βœ… | βœ… | βœ… | βœ… | βœ… | ❌ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "To access Reka models you'll need to create an Reka developer account, get an API key, and install the `langchain_community` integration package and the reka python package via 'pip install reka-api'.\n", + "\n", + "### Credentials\n", + "\n", + "Head to https://platform.reka.ai/ to sign up for Reka and generate an API key. Once you've done this set the REKA_API_KEY environment variable:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Installation\n", + "\n", + "The LangChain __ModuleName__ integration lives in the `langchain_community` package:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install -qU langchain_community reka-api" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"REKA_API_KEY\"] = getpass.getpass(\"Enter your Reka API key: \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Optional: use Langsmith to trace the execution of the model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", + "os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass(\"Enter your Langsmith API key: \")" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.chat_models import ChatReka\n", + "\n", + "model = ChatReka()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content=' Hello! How can I help you today? If you have a question, need assistance, or just want to chat, feel free to let me know. Have a great day!\\n\\n', additional_kwargs={}, response_metadata={}, id='run-61522ec2-0587-4fd5-a492-5b205fd8860c-0')" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.invoke(\"hi\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Images input " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " The image shows an indoor setting with no visible windows or natural light, and there are no indicators of weather conditions. The focus is on a cat sitting on a computer keyboard, and the background includes a computer monitor and various office supplies.\n" + ] + } + ], + "source": [ + "from langchain_core.messages import HumanMessage\n", + "\n", + "image_url = \"https://v0.docs.reka.ai/_images/000000245576.jpg\"\n", + "\n", + "message = HumanMessage(\n", + " content=[\n", + " {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": {\"url\": image_url},\n", + " },\n", + " ],\n", + ")\n", + "response = model.invoke([message])\n", + "print(response.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Multiple images as input" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " The first image features two German Shepherds, one adult and one puppy, in a vibrant, lush green setting. The adult dog is carrying a large stick in its mouth, running through what appears to be a grassy field, with the puppy following close behind. Both dogs exhibit striking physical characteristics typical of the breed, such as pointed ears and dense fur.\n", + "\n", + "The second image shows a close-up of a single cat with striking blue eyes, likely a breed like the Siberian or Maine Coon, in a natural outdoor setting. The cat's fur is lighter, possibly a mix of white and gray, and it has a more subdued expression compared to the dogs. The background is blurred, suggesting a focus on the cat's face.\n", + "\n", + "Overall, the differences lie in the subjects (two dogs vs. one cat), the setting (lush, vibrant grassy field vs. a more muted outdoor background), and the overall mood and activity depicted (playful and active vs. serene and focused).\n" + ] + } + ], + "source": [ + "message = HumanMessage(\n", + " content=[\n", + " {\"type\": \"text\", \"text\": \"What are the difference between the two images? \"},\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": {\n", + " \"url\": \"https://cdn.pixabay.com/photo/2019/07/23/13/51/shepherd-dog-4357790_1280.jpg\"\n", + " },\n", + " },\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": {\n", + " \"url\": \"https://cdn.pixabay.com/photo/2024/02/17/00/18/cat-8578562_1280.jpg\"\n", + " },\n", + " },\n", + " ],\n", + ")\n", + "response = model.invoke([message])\n", + "print(response.content)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chaining" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content=' Ich liebe Programmieren.\\n\\n', additional_kwargs={}, response_metadata={}, id='run-ffc4ace1-b73a-4fb3-ad0f-57e60a0f9b8d-0')" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from langchain_core.prompts import ChatPromptTemplate\n", + "\n", + "prompt = ChatPromptTemplate(\n", + " [\n", + " (\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ),\n", + " (\"human\", \"{input}\"),\n", + " ]\n", + ")\n", + "\n", + "chain = prompt | model\n", + "chain.invoke(\n", + " {\n", + " \"input_language\": \"English\",\n", + " \"output_language\": \"German\",\n", + " \"input\": \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use use with tavtly api search" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tool use and agent creation\n", + "\n", + "## Define the tools\n", + "\n", + "We first need to create the tools we want to use. Our main tool of choice will be Tavily - a search engine. We have a built-in tool in LangChain to easily use Tavily search engine as tool.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"TAVILY_API_KEY\"] = getpass.getpass(\"Enter your Tavily API key: \")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{'url': 'https://www.weatherapi.com/', 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1730484342, 'localtime': '2024-11-01 11:05'}, 'current': {'last_updated_epoch': 1730484000, 'last_updated': '2024-11-01 11:00', 'temp_c': 11.1, 'temp_f': 52.0, 'is_day': 1, 'condition': {'text': 'Mist', 'icon': '//cdn.weatherapi.com/weather/64x64/day/143.png', 'code': 1030}, 'wind_mph': 2.9, 'wind_kph': 4.7, 'wind_degree': 247, 'wind_dir': 'WSW', 'pressure_mb': 1019.0, 'pressure_in': 30.08, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 100, 'cloud': 100, 'feelslike_c': 11.1, 'feelslike_f': 52.0, 'windchill_c': 10.3, 'windchill_f': 50.5, 'heatindex_c': 10.8, 'heatindex_f': 51.5, 'dewpoint_c': 10.4, 'dewpoint_f': 50.6, 'vis_km': 2.8, 'vis_miles': 1.0, 'uv': 3.0, 'gust_mph': 3.8, 'gust_kph': 6.1}}\"}, {'url': 'https://weatherspark.com/h/m/557/2024/1/Historical-Weather-in-January-2024-in-San-Francisco-California-United-States', 'content': 'San Francisco Temperature History January 2024\\nHourly Temperature in January 2024 in San Francisco\\nCompare San Francisco to another city:\\nCloud Cover in January 2024 in San Francisco\\nDaily Precipitation in January 2024 in San Francisco\\nObserved Weather in January 2024 in San Francisco\\nHours of Daylight and Twilight in January 2024 in San Francisco\\nSunrise & Sunset with Twilight in January 2024 in San Francisco\\nSolar Elevation and Azimuth in January 2024 in San Francisco\\nMoon Rise, Set & Phases in January 2024 in San Francisco\\nHumidity Comfort Levels in January 2024 in San Francisco\\nWind Speed in January 2024 in San Francisco\\nHourly Wind Speed in January 2024 in San Francisco\\nHourly Wind Direction in 2024 in San Francisco\\nAtmospheric Pressure in January 2024 in San Francisco\\nData Sources\\n See all nearby weather stations\\nLatest Report β€” 1:56 PM\\nFri, Jan 12, 2024\\xa0\\xa0\\xa0\\xa04 min ago\\xa0\\xa0\\xa0\\xa0UTC 21:56\\nCall Sign KSFO\\nTemp.\\n54.0Β°F\\nPrecipitation\\nNo Report\\nWind\\n8.1 mph\\nCloud Cover\\nMostly Cloudy\\n14,000 ft\\nRaw: KSFO 122156Z 08007KT 10SM FEW030 SCT050 BKN140 12/07 A3022 While having the tremendous advantages of temporal and spatial completeness, these reconstructions: (1) are based on computer models that may have model-based errors, (2) are coarsely sampled on a 50 km grid and are therefore unable to reconstruct the local variations of many microclimates, and (3) have particular difficulty with the weather in some coastal areas, especially small islands.\\n We further caution that our travel scores are only as good as the data that underpin them, that weather conditions at any given location and time are unpredictable and variable, and that the definition of the scores reflects a particular set of preferences that may not agree with those of any particular reader.\\n January 2024 Weather History in San Francisco California, United States\\nThe data for this report comes from the San Francisco International Airport.'}]\n" + ] + } + ], + "source": [ + "from langchain_community.tools.tavily_search import TavilySearchResults\n", + "\n", + "search = TavilySearchResults(max_results=2)\n", + "search_results = search.invoke(\"what is the weather in SF\")\n", + "print(search_results)\n", + "# If we want, we can create other tools.\n", + "# Once we have all the tools we want, we can put them in a list that we will reference later.\n", + "tools = [search]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now see what it is like to enable this model to do tool calling. In order to enable that we use .bind_tools to give the language model knowledge of these tools\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "model_with_tools = model.bind_tools(tools)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now call the model. Let's first call it with a normal message, and see how it responds. We can look at both the content field as well as the tool_calls field.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ContentString: Hello! How can I help you today? If you have a question or need information on a specific topic, feel free to ask. Just type your search query and I'll do my best to assist using the available function.\n", + "\n", + "\n", + "ToolCalls: []\n" + ] + } + ], + "source": [ + "from langchain_core.messages import HumanMessage\n", + "\n", + "response = model_with_tools.invoke([HumanMessage(content=\"Hi!\")])\n", + "\n", + "print(f\"ContentString: {response.content}\")\n", + "print(f\"ToolCalls: {response.tool_calls}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's try calling it with some input that would expect a tool to be called.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ContentString: \n", + "ToolCalls: [{'name': 'tavily_search_results_json', 'args': {'query': 'weather in SF'}, 'id': '2548c622-3553-42df-8220-39fde0632bdb', 'type': 'tool_call'}]\n" + ] + } + ], + "source": [ + "response = model_with_tools.invoke([HumanMessage(content=\"What's the weather in SF?\")])\n", + "\n", + "print(f\"ContentString: {response.content}\")\n", + "print(f\"ToolCalls: {response.tool_calls}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that there's now no text content, but there is a tool call! It wants us to call the Tavily Search tool.\n", + "\n", + "This isn't calling that tool yet - it's just telling us to. In order to actually call it, we'll want to create our agent." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create the agent" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have defined the tools and the LLM, we can create the agent. We will be using LangGraph to construct the agent. Currently, we are using a high level interface to construct the agent, but the nice thing about LangGraph is that this high-level interface is backed by a low-level, highly controllable API in case you want to modify the agent logic.\n", + "\n", + "Now, we can initialize the agent with the LLM and the tools.\n", + "\n", + "Note that we are passing in the model, not model_with_tools. That is because `create_react_agent` will call `.bind_tools` for us under the hood." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "from langgraph.prebuilt import create_react_agent\n", + "\n", + "agent_executor = create_react_agent(model, tools)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now try it out on an example where it should be invoking the tool" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[HumanMessage(content='hi!', additional_kwargs={}, response_metadata={}, id='0ab1f3c7-9079-42d4-8a8a-13af5f6c226b'),\n", + " AIMessage(content=' Hello! How can I help you today? If you have a question or need information on a specific topic, feel free to ask. For example, you can start with a search query like \"latest news on climate change\" or \"biography of Albert Einstein\".\\n\\n', additional_kwargs={}, response_metadata={}, id='run-276d9dcd-13f3-481d-b562-8fe3962d9ba1-0')]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response = agent_executor.invoke({\"messages\": [HumanMessage(content=\"hi!\")]})\n", + "\n", + "response[\"messages\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In order to see exactly what is happening under the hood (and to make sure it's not calling a tool) we can take a look at the LangSmith trace: https://smith.langchain.com/public/2372d9c5-855a-45ee-80f2-94b63493563d/r" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[HumanMessage(content='whats the weather in sf?', additional_kwargs={}, response_metadata={}, id='af276c61-3df7-4241-8cb0-81d1f1477bb3'),\n", + " AIMessage(content='', additional_kwargs={'tool_calls': [{'id': '86da84b8-0d44-444f-8448-7f134f9afa41', 'type': 'function', 'function': {'name': 'tavily_search_results_json', 'arguments': '{\"query\": \"weather in SF\"}'}}]}, response_metadata={}, id='run-abe1b8e2-98a6-4f69-8f95-278ac8c141ff-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'weather in SF'}, 'id': '86da84b8-0d44-444f-8448-7f134f9afa41', 'type': 'tool_call'}]),\n", + " ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.775, \\'lon\\': -122.4183, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1730483436, \\'localtime\\': \\'2024-11-01 10:50\\'}, \\'current\\': {\\'last_updated_epoch\\': 1730483100, \\'last_updated\\': \\'2024-11-01 10:45\\', \\'temp_c\\': 11.4, \\'temp_f\\': 52.5, \\'is_day\\': 1, \\'condition\\': {\\'text\\': \\'Mist\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/day/143.png\\', \\'code\\': 1030}, \\'wind_mph\\': 2.2, \\'wind_kph\\': 3.6, \\'wind_degree\\': 237, \\'wind_dir\\': \\'WSW\\', \\'pressure_mb\\': 1019.0, \\'pressure_in\\': 30.08, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 100, \\'cloud\\': 100, \\'feelslike_c\\': 11.8, \\'feelslike_f\\': 53.2, \\'windchill_c\\': 11.2, \\'windchill_f\\': 52.1, \\'heatindex_c\\': 11.7, \\'heatindex_f\\': 53.0, \\'dewpoint_c\\': 10.1, \\'dewpoint_f\\': 50.1, \\'vis_km\\': 2.8, \\'vis_miles\\': 1.0, \\'uv\\': 3.0, \\'gust_mph\\': 3.0, \\'gust_kph\\': 4.9}}\"}, {\"url\": \"https://www.timeanddate.com/weather/@z-us-94134/ext\", \"content\": \"Forecasted weather conditions the coming 2 weeks for San Francisco. Sign in. News. News Home; Astronomy News; Time Zone News ... 01 pm: Mon Nov 11: 60 / 53 Β°F: Tstorms early. Broken clouds. 54 Β°F: 19 mph: ↑: 70%: 58%: 0.20\\\\\" 0 (Low) 6:46 am: 5:00 pm * Updated Monday, October 28, 2024 2:24:10 pm San Francisco time - Weather by CustomWeather\"}]', name='tavily_search_results_json', id='de8c8d78-ae24-4a8a-9c73-795c1e4fdd41', tool_call_id='86da84b8-0d44-444f-8448-7f134f9afa41', artifact={'query': 'weather in SF', 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [{'title': 'Weather in San Francisco', 'url': 'https://www.weatherapi.com/', 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1730483436, 'localtime': '2024-11-01 10:50'}, 'current': {'last_updated_epoch': 1730483100, 'last_updated': '2024-11-01 10:45', 'temp_c': 11.4, 'temp_f': 52.5, 'is_day': 1, 'condition': {'text': 'Mist', 'icon': '//cdn.weatherapi.com/weather/64x64/day/143.png', 'code': 1030}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 237, 'wind_dir': 'WSW', 'pressure_mb': 1019.0, 'pressure_in': 30.08, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 100, 'cloud': 100, 'feelslike_c': 11.8, 'feelslike_f': 53.2, 'windchill_c': 11.2, 'windchill_f': 52.1, 'heatindex_c': 11.7, 'heatindex_f': 53.0, 'dewpoint_c': 10.1, 'dewpoint_f': 50.1, 'vis_km': 2.8, 'vis_miles': 1.0, 'uv': 3.0, 'gust_mph': 3.0, 'gust_kph': 4.9}}\", 'score': 0.9989501, 'raw_content': None}, {'title': 'San Francisco, USA 14 day weather forecast - timeanddate.com', 'url': 'https://www.timeanddate.com/weather/@z-us-94134/ext', 'content': 'Forecasted weather conditions the coming 2 weeks for San Francisco. Sign in. News. News Home; Astronomy News; Time Zone News ... 01 pm: Mon Nov 11: 60 / 53 Β°F: Tstorms early. Broken clouds. 54 Β°F: 19 mph: ↑: 70%: 58%: 0.20\" 0 (Low) 6:46 am: 5:00 pm * Updated Monday, October 28, 2024 2:24:10 pm San Francisco time - Weather by CustomWeather', 'score': 0.9938309, 'raw_content': None}], 'response_time': 3.56}),\n", + " AIMessage(content=' The current weather in San Francisco is mist with a temperature of 11.4Β°C (52.5Β°F). There is a 100% humidity and the wind is blowing at 2.2 mph from the WSW direction. The forecast for the coming weeks shows a mix of cloudy and partly cloudy days with some chances of thunderstorms. Temperatures are expected to range between 53Β°F and 60Β°F.\\n\\n', additional_kwargs={}, response_metadata={}, id='run-de4207d6-e8e8-4382-ad16-4de0dcf0812a-0')]" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response = agent_executor.invoke(\n", + " {\"messages\": [HumanMessage(content=\"whats the weather in sf?\")]}\n", + ")\n", + "response[\"messages\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can check out the LangSmith trace to make sure it's calling the search tool effectively.\n", + "\n", + "https://smith.langchain.com/public/013ef704-654b-4447-8428-637b343d646e/r" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We've seen how the agent can be called with `.invoke` to get a final response. If the agent executes multiple steps, this may take a while. To show intermediate progress, we can stream back messages as they occur.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': '2457d3ea-f001-4b8c-a1ed-3dc3d1381639', 'type': 'function', 'function': {'name': 'tavily_search_results_json', 'arguments': '{\"query\": \"weather in San Francisco\"}'}}]}, response_metadata={}, id='run-0363deab-84d2-4319-bb1e-b55b47fe2274-0', tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'weather in San Francisco'}, 'id': '2457d3ea-f001-4b8c-a1ed-3dc3d1381639', 'type': 'tool_call'}])]}}\n", + "----\n", + "{'tools': {'messages': [ToolMessage(content='[{\"url\": \"https://www.weatherapi.com/\", \"content\": \"{\\'location\\': {\\'name\\': \\'San Francisco\\', \\'region\\': \\'California\\', \\'country\\': \\'United States of America\\', \\'lat\\': 37.775, \\'lon\\': -122.4183, \\'tz_id\\': \\'America/Los_Angeles\\', \\'localtime_epoch\\': 1730483636, \\'localtime\\': \\'2024-11-01 10:53\\'}, \\'current\\': {\\'last_updated_epoch\\': 1730483100, \\'last_updated\\': \\'2024-11-01 10:45\\', \\'temp_c\\': 11.4, \\'temp_f\\': 52.5, \\'is_day\\': 1, \\'condition\\': {\\'text\\': \\'Mist\\', \\'icon\\': \\'//cdn.weatherapi.com/weather/64x64/day/143.png\\', \\'code\\': 1030}, \\'wind_mph\\': 2.2, \\'wind_kph\\': 3.6, \\'wind_degree\\': 237, \\'wind_dir\\': \\'WSW\\', \\'pressure_mb\\': 1019.0, \\'pressure_in\\': 30.08, \\'precip_mm\\': 0.0, \\'precip_in\\': 0.0, \\'humidity\\': 100, \\'cloud\\': 100, \\'feelslike_c\\': 11.8, \\'feelslike_f\\': 53.2, \\'windchill_c\\': 11.2, \\'windchill_f\\': 52.1, \\'heatindex_c\\': 11.7, \\'heatindex_f\\': 53.0, \\'dewpoint_c\\': 10.1, \\'dewpoint_f\\': 50.1, \\'vis_km\\': 2.8, \\'vis_miles\\': 1.0, \\'uv\\': 3.0, \\'gust_mph\\': 3.0, \\'gust_kph\\': 4.9}}\"}, {\"url\": \"https://weather.com/weather/monthly/l/69bedc6a5b6e977993fb3e5344e3c06d8bc36a1fb6754c3ddfb5310a3c6d6c87\", \"content\": \"Weather.com brings you the most accurate monthly weather forecast for San Francisco, CA with average/record and high/low temperatures, precipitation and more. ... 11. 66 Β° 55 Β° 12. 69 Β° 60\"}]', name='tavily_search_results_json', id='e675f99b-130f-4e98-8477-badd45938d9d', tool_call_id='2457d3ea-f001-4b8c-a1ed-3dc3d1381639', artifact={'query': 'weather in San Francisco', 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [{'title': 'Weather in San Francisco', 'url': 'https://www.weatherapi.com/', 'content': \"{'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1730483636, 'localtime': '2024-11-01 10:53'}, 'current': {'last_updated_epoch': 1730483100, 'last_updated': '2024-11-01 10:45', 'temp_c': 11.4, 'temp_f': 52.5, 'is_day': 1, 'condition': {'text': 'Mist', 'icon': '//cdn.weatherapi.com/weather/64x64/day/143.png', 'code': 1030}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 237, 'wind_dir': 'WSW', 'pressure_mb': 1019.0, 'pressure_in': 30.08, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 100, 'cloud': 100, 'feelslike_c': 11.8, 'feelslike_f': 53.2, 'windchill_c': 11.2, 'windchill_f': 52.1, 'heatindex_c': 11.7, 'heatindex_f': 53.0, 'dewpoint_c': 10.1, 'dewpoint_f': 50.1, 'vis_km': 2.8, 'vis_miles': 1.0, 'uv': 3.0, 'gust_mph': 3.0, 'gust_kph': 4.9}}\", 'score': 0.9968992, 'raw_content': None}, {'title': 'Monthly Weather Forecast for San Francisco, CA - weather.com', 'url': 'https://weather.com/weather/monthly/l/69bedc6a5b6e977993fb3e5344e3c06d8bc36a1fb6754c3ddfb5310a3c6d6c87', 'content': 'Weather.com brings you the most accurate monthly weather forecast for San Francisco, CA with average/record and high/low temperatures, precipitation and more. ... 11. 66 Β° 55 Β° 12. 69 Β° 60', 'score': 0.97644573, 'raw_content': None}], 'response_time': 3.16})]}}\n", + "----\n", + "{'agent': {'messages': [AIMessage(content=' The current weather in San Francisco is misty with a temperature of 11.4Β°C (52.5Β°F). The wind is blowing at 2.2 mph (3.6 kph) from the WSW direction. The humidity is at 100%, and the visibility is 2.8 km (1.0 miles). The monthly forecast shows average temperatures ranging from 55Β°F to 66Β°F (13Β°C to 19Β°C) with some precipitation expected.\\n\\n', additional_kwargs={}, response_metadata={}, id='run-99ccf444-d286-4244-a5a5-7b1b511153a6-0')]}}\n", + "----\n" + ] + } + ], + "source": [ + "for chunk in agent_executor.stream(\n", + " {\"messages\": [HumanMessage(content=\"whats the weather in sf?\")]}\n", + "):\n", + " print(chunk)\n", + " print(\"----\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "https://docs.reka.ai/quick-start" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "langchain_reka", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/docs/integrations/llms/sambanovacloud.ipynb b/docs/docs/integrations/llms/sambanovacloud.ipynb new file mode 100644 index 00000000000000..cffe08553fa957 --- /dev/null +++ b/docs/docs/integrations/llms/sambanovacloud.ipynb @@ -0,0 +1,253 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# SambaNovaCloud\n", + "\n", + "**[SambaNova](https://sambanova.ai/)'s [SambaNova Cloud](https://cloud.sambanova.ai/)** is a platform for performing inference with open-source models\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of SambaNovaCloud models as [text completion models](/docs/concepts/text_llms/). We recommend you to use the [chat completion models](/docs/concepts/chat_models).\n", + "\n", + "You may be looking for [SambaNovaCloud Chat Models](/docs/integrations/chat/sambanova/) .\n", + ":::\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | JS support | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [SambaNovaCloud](https://python.langchain.com/api_reference/community/llms/langchain_community.llms.sambanova.SambaNovaCloud.html) | [langchain_community](https://python.langchain.com/api_reference/community/index.html) | ❌ | beta | ❌ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain_community?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain_community?style=flat-square&label=%20) |\n", + "\n", + "This example goes over how to use LangChain to interact with SambaNovaCloud models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "### Credentials\n", + "To access ChatSambaNovaCloud models you will need to create a [SambaNovaCloud account](https://cloud.sambanova.ai/), get an API key and set it as the `SAMBANOVA_API_KEY` environment variable:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "if \"SAMBANOVA_API_KEY\" not in os.environ:\n", + " os.environ[\"SAMBANOVA_API_KEY\"] = getpass.getpass()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Installation\n", + "\n", + "The integration lives in the `langchain-community` package. We also need to install the [sseclient-py](https://pypi.org/project/sseclient-py/) package this is required to run streaming predictions " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --quiet -U langchain-community sseclient-py" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.llms.sambanova import SambaNovaCloud\n", + "\n", + "llm = SambaNovaCloud(\n", + " model=\"Meta-Llama-3.1-70B-Instruct\",\n", + " max_tokens_to_generate=1000,\n", + " temperature=0.01,\n", + " # top_k = 50,\n", + " # top_p = 1.0\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Invocation\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"**Advantages of Open Source Models**\\n\\nUsing open source models can bring numerous benefits to your project or organization. Here are some reasons why you should consider using open source models:\\n\\n### 1. **Cost-Effective**\\n\\nOpen source models are free to use, modify, and distribute. This can significantly reduce the costs associated with developing and maintaining proprietary models.\\n\\n### 2. **Community Support**\\n\\nOpen source models are often maintained by a community of developers and users who contribute to their improvement. This community support can lead to faster bug fixes, new feature additions, and better documentation.\\n\\n### 3. **Transparency and Customizability**\\n\\nOpen source models provide complete transparency into their architecture and implementation. This allows you to customize and fine-tune the model to suit your specific needs.\\n\\n### 4. **Faster Development**\\n\\nBy leveraging pre-trained open source models, you can accelerate your development process. You can focus on fine-tuning the model for your specific use case rather than building one from scratch.\\n\\n### 5. **Improved Security**\\n\\nOpen source models are often reviewed and audited by a large community of developers, which can help identify and fix security vulnerabilities.\\n\\n### 6. **Interoperability**\\n\\nOpen source models can be easily integrated with other open source tools and frameworks, promoting interoperability and reducing vendor lock-in.\\n\\n### 7. **Access to State-of-the-Art Technology**\\n\\nMany open source models are developed by top researchers and institutions, providing access to state-of-the-art technology and techniques.\\n\\n### Example Use Cases\\n\\n* **Computer Vision**: Use open source models like TensorFlow's Object Detection API or OpenCV's pre-trained models for image classification, object detection, and segmentation tasks.\\n* **Natural Language Processing**: Leverage open source models like spaCy or Stanford CoreNLP for text processing, sentiment analysis, and language translation tasks.\\n* **Speech Recognition**: Utilize open source models like Kaldi or Mozilla's DeepSpeech for speech-to-text applications.\\n\\n**Getting Started**\\n\\nTo get started with open source models, explore popular repositories on GitHub or model hubs like TensorFlow Hub or PyTorch Hub. Familiarize yourself with the model's documentation, and experiment with pre-trained models before fine-tuning them for your specific use case.\\n\\nBy embracing open source models, you can accelerate your development process, reduce costs, and tap into the collective knowledge of the developer community.\"" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "input_text = \"Why should I use open source models?\"\n", + "\n", + "completion = llm.invoke(input_text)\n", + "completion" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "**Advantages of Open Source Models**\n", + "\n", + "Using open source models can bring numerous benefits to your projects. Here are some reasons why you should consider them:\n", + "\n", + "### 1. **Cost-Effective**\n", + "\n", + "Open source models are free to use, modify, and distribute. This can significantly reduce the costs associated with developing and maintaining proprietary models.\n", + "\n", + "### 2. **Community Support**\n", + "\n", + "Open source models are often maintained by a community of developers, researchers, and users. This community can provide support, fix bugs, and contribute to the model's improvement.\n", + "\n", + "### 3. **Transparency and Reproducibility**\n", + "\n", + "Open source models are transparent in their architecture, training data, and hyperparameters. This transparency allows for reproducibility, which is essential in scientific research and development.\n", + "\n", + "### 4. **Customizability**\n", + "\n", + "Open source models can be modified to suit specific use cases or requirements. This customizability enables developers to adapt the model to their needs, which can lead to better performance and accuracy.\n", + "\n", + "### 5. **Faster Development**\n", + "\n", + "Using open source models can accelerate development by providing a pre-trained foundation. This allows developers to focus on fine-tuning the model for their specific task, rather than starting from scratch.\n", + "\n", + "### 6. **Access to State-of-the-Art Models**\n", + "\n", + "Open source models often represent the state-of-the-art in their respective domains. By using these models, developers can leverage the latest advancements in AI research.\n", + "\n", + "### 7. **Reduced Vendor Lock-in**\n", + "\n", + "Open source models are not tied to a specific vendor or platform. This reduces the risk of vendor lock-in and allows developers to switch to alternative solutions if needed.\n", + "\n", + "### Example Use Cases\n", + "\n", + "* **Computer Vision**: Using open source models like YOLO (You Only Look Once) or SSD (Single Shot Detector) for object detection tasks.\n", + "* **Natural Language Processing**: Leveraging open source models like BERT (Bidirectional Encoder Representations from Transformers) or RoBERTa (Robustly Optimized BERT Pretraining Approach) for text classification, sentiment analysis, or language translation.\n", + "* **Speech Recognition**: Utilizing open source models like Kaldi or Mozilla DeepSpeech for speech-to-text applications.\n", + "\n", + "**Getting Started**\n", + "\n", + "To get started with open source models, you can explore popular repositories on GitHub or model hubs like the TensorFlow Model Garden or the PyTorch Model Zoo. These resources provide pre-trained models, documentation, and tutorials to help you integrate open source models into your projects.\n", + "\n", + "By embracing open source models, you can tap into the collective knowledge and expertise of the developer community, accelerate your development process, and create more accurate and efficient AI solutions." + ] + } + ], + "source": [ + "# Streaming response\n", + "for chunk in llm.stream(\"Why should I use open source models?\"):\n", + " print(chunk, end=\"\", flush=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chaining\n", + "We can chain our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'The translation of \"I love programming\" in German is:\\n\\n\"Ich liebe das Programmieren.\"\\n\\nHere\\'s a breakdown of the sentence:\\n\\n* \"Ich\" means \"I\"\\n* \"liebe\" is the verb \"to love\" in the first person singular (I love)\\n* \"das\" is the definite article for \"Programmieren\" (programming)\\n* \"Programmieren\" is the verb \"to program\" in the infinitive form, but in this context, it\\'s used as a noun to refer to the activity of programming.\\n\\nSo, \"Ich liebe das Programmieren\" is a common way to express your passion for programming in German.'" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from langchain_core.prompts import PromptTemplate\n", + "\n", + "prompt = PromptTemplate.from_template(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "chain = prompt | llm\n", + "chain.invoke(\n", + " {\n", + " \"output_language\": \"German\",\n", + " \"input\": \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all `SambaNovaCloud` llm features and configurations head to the API reference: https://python.langchain.com/api_reference/community/llms/langchain_community.llms.sambanova.SambaNovaCloud.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "multimodalenv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb index c5830e7db688fc..59a4c2fa518c6b 100644 --- a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb @@ -136,7 +136,7 @@ }, "outputs": [], "source": [ - "from langchain.chains.query_constructor.base import AttributeInfo\n", + "from langchain.chains.query_constructor.schema import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", "from langchain_openai import OpenAI\n", "\n", diff --git a/docs/docs/introduction.mdx b/docs/docs/introduction.mdx index 68d7aaa41868d1..b3edcfbd15dffd 100644 --- a/docs/docs/introduction.mdx +++ b/docs/docs/introduction.mdx @@ -33,7 +33,7 @@ Concretely, the framework consists of the following open-source libraries: - **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture. - **`langchain-community`**: Third-party integrations that are community maintained. - **[LangGraph](https://langchain-ai.github.io/langgraph)**: Build robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph. Integrates smoothly with LangChain, but can be used without it. -- **[LangServe](/docs/langserve)**: Deploy LangChain chains as REST APIs. +- **[LangGraphPlatform](https://langchain-ai.github.io/langgraph/concepts/#langgraph-platform)**: Deploy LLM applications built with LangGraph to production. - **[LangSmith](https://docs.smith.langchain.com)**: A developer platform that lets you debug, test, evaluate, and monitor LLM applications. diff --git a/docs/docs/tutorials/index.mdx b/docs/docs/tutorials/index.mdx index e524695dbaed7e..7cf4af7e2db56e 100644 --- a/docs/docs/tutorials/index.mdx +++ b/docs/docs/tutorials/index.mdx @@ -6,7 +6,7 @@ sidebar_class_name: hidden New to LangChain or LLM app development in general? Read this material to quickly get up and running building your first applications. -If you're looking to get up and running quickly with [chat models](/docs/integrations/chat/), [vector stores](/docs/integrations/vectorstores/), +If you're looking to get started with [chat models](/docs/integrations/chat/), [vector stores](/docs/integrations/vectorstores/), or other LangChain components from a specific provider, check out our supported [integrations](/docs/integrations/providers/). Refer to the [how-to guides](/docs/how_to) for more detail on using common LangChain components. @@ -14,7 +14,7 @@ Refer to the [how-to guides](/docs/how_to) for more detail on using common LangC See the [conceptual documentation](/docs/concepts) for high level explanations of all LangChain concepts. ## Basics -- [LLM applications](/docs/tutorials/llm_chain): Build and deploy a simple LLM application. +- [LLM applications](/docs/tutorials/llm_chain): Build a simple LLM application with prompt templates and chat models. - [Chatbots](/docs/tutorials/chatbot): Build a chatbot that incorporates memory. - [Vector stores](/docs/tutorials/retrievers): Build vector stores and use them to retrieve data. - [Agents](/docs/tutorials/agents): Build an agent that interacts with external tools. diff --git a/docs/docs/tutorials/llm_chain.ipynb b/docs/docs/tutorials/llm_chain.ipynb index db837dad56b97f..fccc2c6cd9fc1b 100644 --- a/docs/docs/tutorials/llm_chain.ipynb +++ b/docs/docs/tutorials/llm_chain.ipynb @@ -15,7 +15,7 @@ "id": "9316da0d", "metadata": {}, "source": [ - "# Build a Simple LLM Application with LCEL\n", + "# Build a Simple LLM Application\n", "\n", "In this quickstart we'll show you how to build a simple LLM application with LangChain. This application will translate text from English into another language. This is a relatively simple LLM application - it's just a single LLM call plus some prompting. Still, this is a great way to get started with LangChain - a lot of features can be built with just some prompting and an LLM call!\n", "\n", @@ -23,23 +23,17 @@ "\n", "- Using [language models](/docs/concepts/chat_models)\n", "\n", - "- Using [PromptTemplates](/docs/concepts/prompt_templates) and [OutputParsers](/docs/concepts/output_parsers)\n", - "\n", - "- Using [LangChain Expression Language (LCEL)](/docs/concepts/lcel) to chain components together\n", + "- Using [PromptTemplates](/docs/concepts/prompt_templates)\n", "\n", "- Debugging and tracing your application using [LangSmith](https://docs.smith.langchain.com/)\n", "\n", - "- Deploying your application with [LangServe](/docs/concepts/architecture/#langserve)\n", - "\n", "Let's dive in!\n", "\n", "## Setup\n", "\n", "### Jupyter Notebook\n", "\n", - "This guide (and most of the other guides in the documentation) uses [Jupyter notebooks](https://jupyter.org/) and assumes the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because oftentimes things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them.\n", - "\n", - "This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n", + "This and other tutorials are perhaps most conveniently run in a [Jupyter notebooks](https://jupyter.org/). Going through guides in an interactive environment is a great way to better understand them. See [here](https://jupyter.org/install) for instructions on how to install.\n", "\n", "### Installation\n", "\n", @@ -97,7 +91,7 @@ "\n", "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", "\n", - "\n" + "\n" ] }, { @@ -112,7 +106,7 @@ "\n", "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model=\"gpt-4\")" + "model = ChatOpenAI(model=\"gpt-4o\")" ] }, { @@ -120,22 +114,22 @@ "id": "ca5642ff", "metadata": {}, "source": [ - "Let's first use the model directly. `ChatModel`s are instances of LangChain \"Runnables\", which means they expose a standard interface for interacting with them. To just simply call the model, we can pass in a list of messages to the `.invoke` method." + "Let's first use the model directly. [ChatModels](/docs/concepts/chat_models) are instances of LangChain [Runnables](/docs/concepts/runnables/), which means they expose a standard interface for interacting with them. To simply call the model, we can pass in a list of messages to the `.invoke` method." ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 2, "id": "1b2481f0", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='ciao!', response_metadata={'token_usage': {'completion_tokens': 3, 'prompt_tokens': 20, 'total_tokens': 23}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-fc5d7c88-9615-48ab-a3c7-425232b562c5-0')" + "AIMessage(content='Ciao!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 3, 'prompt_tokens': 20, 'total_tokens': 23, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_9ee9e968ea', 'finish_reason': 'stop', 'logprobs': None}, id='run-ad371806-6082-45c3-b6fa-e44622848ab2-0', usage_metadata={'input_tokens': 20, 'output_tokens': 3, 'total_tokens': 23, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})" ] }, - "execution_count": 16, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -156,119 +150,9 @@ "id": "f83373db", "metadata": {}, "source": [ - "If we've enabled LangSmith, we can see that this run is logged to LangSmith, and can see the [LangSmith trace](https://smith.langchain.com/public/88baa0b2-7c1a-4d09-ba30-a47985dde2ea/r)" - ] - }, - { - "cell_type": "markdown", - "id": "32bd03ed", - "metadata": {}, - "source": [ - "## OutputParsers\n", - "\n", - "Notice that the response from the model is an `AIMessage`. This contains a string response along with other metadata about the response. Oftentimes we may just want to work with the string response. We can parse out just this response by using a simple output parser.\n", - "\n", - "We first import the simple output parser." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "d7ae9c58", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_core.output_parsers import StrOutputParser\n", - "\n", - "parser = StrOutputParser()" - ] - }, - { - "cell_type": "markdown", - "id": "eaebe33a", - "metadata": {}, - "source": [ - "One way to use it is to use it by itself. For example, we could save the result of the language model call and then pass it to the parser." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "6bacb837", - "metadata": {}, - "outputs": [], - "source": [ - "result = model.invoke(messages)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "efb8da87", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Ciao!'" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "parser.invoke(result)" - ] - }, - { - "cell_type": "markdown", - "id": "d508b79d", - "metadata": {}, - "source": [ - "More commonly, we can \"chain\" the model with this output parser. This means this output parser will get called every time in this chain. This chain takes on the input type of the language model (string or list of message) and returns the output type of the output parser (string).\n", + "If we've enabled LangSmith, we can see that this run is logged to LangSmith, and can see the [LangSmith trace](https://smith.langchain.com/public/88baa0b2-7c1a-4d09-ba30-a47985dde2ea/r). The LangSmith trace reports [token](/docs/concepts/tokens/) usage information, latency, [standard model parameters](/docs/concepts/chat_models/#standard-parameters) (such as temperature), and other information.\n", "\n", - "We can easily create the chain using the `|` operator. The `|` operator is used in LangChain to combine two elements together." - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "9449cfa6", - "metadata": {}, - "outputs": [], - "source": [ - "chain = model | parser" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "3e82f933", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Ciao!'" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chain.invoke(messages)" - ] - }, - { - "cell_type": "markdown", - "id": "dd009096", - "metadata": {}, - "source": [ - "If we now look at LangSmith, we can see that the chain has two steps: first the language model is called, then the result of that is passed to the output parser. We can see the [LangSmith trace]( https://smith.langchain.com/public/f1bdf656-2739-42f7-ac7f-0f1dd712322f/r)" + "Note that ChatModels receive [message](/docs/concepts/messages/) objects as input and generate message objects as output. In addition to text content, message objects convey conversational [roles](/docs/concepts/messages/#role) and hold important data, such as [tool calls](/docs/concepts/tool_calling/) and token usage counts." ] }, { @@ -280,9 +164,9 @@ "\n", "Right now we are passing a list of messages directly into the language model. Where does this list of messages come from? Usually, it is constructed from a combination of user input and application logic. This application logic usually takes the raw user input and transforms it into a list of messages ready to pass to the language model. Common transformations include adding a system message or formatting a template with the user input.\n", "\n", - "PromptTemplates are a concept in LangChain designed to assist with this transformation. They take in raw user input and return data (a prompt) that is ready to pass into a language model. \n", + "[Prompt templates](/docs/concepts/prompt_templates/) are a concept in LangChain designed to assist with this transformation. They take in raw user input and return data (a prompt) that is ready to pass into a language model. \n", "\n", - "Let's create a PromptTemplate here. It will take in two user variables:\n", + "Let's create a prompt template here. It will take in two user variables:\n", "\n", "- `language`: The language to translate text into\n", "- `text`: The text to translate" @@ -290,12 +174,18 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 3, "id": "3e73cc20", "metadata": {}, "outputs": [], "source": [ - "from langchain_core.prompts import ChatPromptTemplate" + "from langchain_core.prompts import ChatPromptTemplate\n", + "\n", + "system_template = \"Translate the following from English into {language}\"\n", + "\n", + "prompt_template = ChatPromptTemplate.from_messages(\n", + " [(\"system\", system_template), (\"user\", \"{text}\")]\n", + ")" ] }, { @@ -303,37 +193,7 @@ "id": "7e876c2a", "metadata": {}, "source": [ - "First, let's create a string that we will format to be the system message:" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "fd75ecde", - "metadata": {}, - "outputs": [], - "source": [ - "system_template = \"Translate the following into {language}:\"" - ] - }, - { - "cell_type": "markdown", - "id": "fedf6f13", - "metadata": {}, - "source": [ - "Next, we can create the PromptTemplate. This will be a combination of the `system_template` as well as a simpler template for where to put the text to be translated" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "88e566f3", - "metadata": {}, - "outputs": [], - "source": [ - "prompt_template = ChatPromptTemplate.from_messages(\n", - " [(\"system\", system_template), (\"user\", \"{text}\")]\n", - ")" + "Note that `ChatPromptTemplate` supports multiple [message roles](/docs/concepts/messages/#role) in a single template. We format the `language` parameter into the system message, and the user `text` into a user message." ] }, { @@ -346,23 +206,23 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 4, "id": "f781b3cb", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "ChatPromptValue(messages=[SystemMessage(content='Translate the following into italian:'), HumanMessage(content='hi')])" + "ChatPromptValue(messages=[SystemMessage(content='Translate the following from English into Italian', additional_kwargs={}, response_metadata={}), HumanMessage(content='hi!', additional_kwargs={}, response_metadata={})])" ] }, - "execution_count": 27, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "result = prompt_template.invoke({\"language\": \"italian\", \"text\": \"hi\"})\n", + "result = prompt_template.invoke({\"language\": \"Italian\", \"text\": \"hi!\"})\n", "\n", "result" ] @@ -377,18 +237,18 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 5, "id": "2159b619", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[SystemMessage(content='Translate the following into italian:'),\n", - " HumanMessage(content='hi')]" + "[SystemMessage(content='Translate the following from English into Italian', additional_kwargs={}, response_metadata={}),\n", + " HumanMessage(content='hi!', additional_kwargs={}, response_metadata={})]" ] }, - "execution_count": 28, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -404,38 +264,36 @@ "source": [ "## Chaining together components with LCEL\n", "\n", - "We can now combine this with the model and the output parser from above using the pipe (`|`) operator:" + "We can now combine this with the model from above using the pipe (`|`) operator:" ] }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 6, "id": "6c6beb4b", "metadata": {}, "outputs": [], "source": [ - "chain = prompt_template | model | parser" + "chain = prompt_template | model" ] }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 7, "id": "3e45595a", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "'ciao'" - ] - }, - "execution_count": 30, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Ciao!\n" + ] } ], "source": [ - "chain.invoke({\"language\": \"italian\", \"text\": \"hi\"})" + "response = chain.invoke({\"language\": \"Italian\", \"text\": \"hi!\"})\n", + "print(response.content)" ] }, { @@ -443,116 +301,14 @@ "id": "0b19cecb", "metadata": {}, "source": [ - "This is a simple example of using [LangChain Expression Language (LCEL)](/docs/concepts/lcel) to chain together LangChain modules. There are several benefits to this approach, including optimized streaming and tracing support.\n", + ":::tip\n", + "Message `content` can contain both text and [content blocks](/docs/concepts/messages/#aimessage) with additional structure. See [this guide](/docs/how_to/output_parser_string/) for more information.\n", + ":::\n", "\n", - "If we take a look at the LangSmith trace, we can see all three components show up in the [LangSmith trace](https://smith.langchain.com/public/bc49bec0-6b13-4726-967f-dbd3448b786d/r)." - ] - }, - { - "cell_type": "markdown", - "id": "a515ddd0", - "metadata": {}, - "source": [ - "## Serving with LangServe\n", - "\n", - "Now that we've built an application, we need to serve it. That's where LangServe comes in.\n", - "LangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we'll show how you can deploy your app with LangServe.\n", - "\n", - "While the first part of this guide was intended to be run in a Jupyter Notebook or script, we will now move out of that. We will be creating a Python file and then interacting with it from the command line.\n", - "\n", - "Install with:\n", - "```bash\n", - "pip install \"langserve[all]\"\n", - "```\n", - "\n", - "### Server\n", - "\n", - "To create a server for our application we'll make a `serve.py` file. This will contain our logic for serving our application. It consists of three things:\n", - "1. The definition of our chain that we just built above\n", - "2. Our FastAPI app\n", - "3. A definition of a route from which to serve the chain, which is done with `langserve.add_routes`\n", - "\n", - "\n", - "```python\n", - "#!/usr/bin/env python\n", - "from fastapi import FastAPI\n", - "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_openai import ChatOpenAI\n", - "from langserve import add_routes\n", - "\n", - "# 1. Create prompt template\n", - "system_template = \"Translate the following into {language}:\"\n", - "prompt_template = ChatPromptTemplate.from_messages([\n", - " ('system', system_template),\n", - " ('user', '{text}')\n", - "])\n", - "\n", - "# 2. Create model\n", - "model = ChatOpenAI()\n", - "\n", - "# 3. Create parser\n", - "parser = StrOutputParser()\n", - "\n", - "# 4. Create chain\n", - "chain = prompt_template | model | parser\n", - "\n", - "# 5. App definition\n", - "app = FastAPI(\n", - " title=\"LangChain Server\",\n", - " version=\"1.0\",\n", - " description=\"A simple API server using LangChain's Runnable interfaces\",\n", - ")\n", - "\n", - "# 6. Adding chain route\n", - "add_routes(\n", - " app,\n", - " chain,\n", - " path=\"/chain\",\n", - ")\n", - "\n", - "if __name__ == \"__main__\":\n", - " import uvicorn\n", - "\n", - " uvicorn.run(app, host=\"localhost\", port=8000)\n", - "```\n", - "\n", - "And that's it! If we execute this file:\n", - "```bash\n", - "python serve.py\n", - "```\n", - "we should see our chain being served at [http://localhost:8000](http://localhost:8000).\n", - "\n", - "### Playground\n", "\n", - "Every LangServe service comes with a simple [built-in UI](https://github.com/langchain-ai/langserve/blob/main/README.md#playground) for configuring and invoking the application with streaming output and visibility into intermediate steps.\n", - "Head to [http://localhost:8000/chain/playground/](http://localhost:8000/chain/playground/) to try it out! Pass in the same inputs as before - `{\"language\": \"italian\", \"text\": \"hi\"}` - and it should respond same as before.\n", - "\n", - "### Client\n", + "This is a simple example of using [LangChain Expression Language (LCEL)](/docs/concepts/lcel) to chain together LangChain modules. There are several benefits to this approach, including optimized streaming and tracing support.\n", "\n", - "Now let's set up a client for programmatically interacting with our service. We can easily do this with the [langserve.RemoteRunnable](/docs/langserve/#client).\n", - "Using this, we can interact with the served chain as if it were running client-side." - ] - }, - { - "cell_type": "markdown", - "id": "96a19287-f3d5-42be-8338-5a5d749101b0", - "metadata": {}, - "source": [ - "```python\n", - "from langserve import RemoteRunnable\n", - "\n", - "remote_chain = RemoteRunnable(\"http://localhost:8000/chain/\")\n", - "remote_chain.invoke({\"language\": \"italian\", \"text\": \"hi\"})\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "480b78a9", - "metadata": {}, - "source": [ - "To learn more about the many other features of LangServe [head here](/docs/langserve)." + "If we take a look at the [LangSmith trace](https://smith.langchain.com/public/bc49bec0-6b13-4726-967f-dbd3448b786d/r), we can see both components show up." ] }, { @@ -562,7 +318,7 @@ "source": [ "## Conclusion\n", "\n", - "That's it! In this tutorial you've learned how to create your first simple LLM application. You've learned how to work with language models, how to parse their outputs, how to create a prompt template, chaining them with LCEL, how to get great observability into chains you create with LangSmith, and how to deploy them with LangServe.\n", + "That's it! In this tutorial you've learned how to create your first simple LLM application. You've learned how to work with language models, how to how to create a prompt template, and how to get great observability into chains you create with LangSmith.\n", "\n", "This just scratches the surface of what you will want to learn to become a proficient AI Engineer. Luckily - we've got a lot of other resources!\n", "\n", @@ -570,11 +326,8 @@ "\n", "If you have more specific questions on these concepts, check out the following sections of the how-to guides:\n", "\n", - "- [LangChain Expression Language (LCEL)](/docs/how_to/#langchain-expression-language-lcel)\n", - "- [Prompt templates](/docs/how_to/#prompt-templates)\n", "- [Chat models](/docs/how_to/#chat-models)\n", - "- [Output parsers](/docs/how_to/#output-parsers)\n", - "- [LangServe](/docs/langserve/)\n", + "- [Prompt templates](/docs/how_to/#prompt-templates)\n", "\n", "And the LangSmith docs:\n", "\n", diff --git a/docs/docs/tutorials/summarization.ipynb b/docs/docs/tutorials/summarization.ipynb index c63f4c6479140e..1669f5f071dce2 100644 --- a/docs/docs/tutorials/summarization.ipynb +++ b/docs/docs/tutorials/summarization.ipynb @@ -156,7 +156,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet tiktoken langchain langgraph beautifulsoup4\n", + "%pip install --upgrade --quiet tiktoken langchain langgraph beautifulsoup4 langchain-community\n", "\n", "# Set env var OPENAI_API_KEY or load from a .env file\n", "# import dotenv\n", diff --git a/docs/docs/versions/v0_3/index.mdx b/docs/docs/versions/v0_3/index.mdx index f8cba890d74208..f553ab19866160 100644 --- a/docs/docs/versions/v0_3/index.mdx +++ b/docs/docs/versions/v0_3/index.mdx @@ -132,7 +132,7 @@ should ensure that they are passing Pydantic 2 objects to these APIs rather than Pydantic 1 objects (created via the `pydantic.v1` namespace of pydantic 2). :::caution -While `v1` objets may be accepted by some of these APIs, users are advised to +While `v1` objects may be accepted by some of these APIs, users are advised to use Pydantic 2 objects to avoid future issues. ::: diff --git a/docs/scripts/notebook_convert.py b/docs/scripts/notebook_convert.py index 70d1745585f236..02b83f80328393 100644 --- a/docs/scripts/notebook_convert.py +++ b/docs/scripts/notebook_convert.py @@ -24,6 +24,16 @@ def preprocess_cell(self, cell, resources, cell_index): # escape ``` in code cell.source = cell.source.replace("```", r"\`\`\`") # escape ``` in output + + # allow overriding title based on comment at beginning of cell + if cell.source.startswith("# title="): + lines = cell.source.split("\n") + title = lines[0].split("# title=")[1] + if title.startswith('"') and title.endswith('"'): + title = title[1:-1] + cell.metadata["title"] = title + cell.source = "\n".join(lines[1:]) + if "outputs" in cell: filter_out = set() for i, output in enumerate(cell["outputs"]): diff --git a/docs/scripts/notebook_convert_templates/mdoutput/index.md.j2 b/docs/scripts/notebook_convert_templates/mdoutput/index.md.j2 index 00e1e03057f6db..f6b76d919a2eb0 100644 --- a/docs/scripts/notebook_convert_templates/mdoutput/index.md.j2 +++ b/docs/scripts/notebook_convert_templates/mdoutput/index.md.j2 @@ -1,5 +1,20 @@ {% extends 'markdown/index.md.j2' %} +{% block input %} +``` +{%- if 'magics_language' in cell.metadata -%} + {{ cell.metadata.magics_language}} +{%- elif 'name' in nb.metadata.get('language_info', {}) -%} + {{ nb.metadata.language_info.name }} +{%- endif %} +{%- if 'title' in cell.metadata -%} + {{ ' ' }}title="{{ cell.metadata.title }}" + +{%- endif %} +{{ cell.source}} +``` +{% endblock input %} + {%- block traceback_line -%} ```output {{ line.rstrip() | strip_ansi }} diff --git a/docs/sidebars.js b/docs/sidebars.js index 62d53f91a3e61d..f1e867c6f5c003 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -204,34 +204,34 @@ module.exports = { }, { type: "category", - label: "LLMs", + label: "Retrievers", collapsible: false, items: [ { type: "autogenerated", - dirName: "integrations/llms", + dirName: "integrations/retrievers", className: "hidden", }, ], link: { type: "doc", - id: "integrations/llms/index", + id: "integrations/retrievers/index", }, }, { type: "category", - label: "Embedding models", + label: "Tools/Toolkits", collapsible: false, items: [ { type: "autogenerated", - dirName: "integrations/text_embedding", + dirName: "integrations/tools", className: "hidden", }, ], link: { type: "doc", - id: "integrations/text_embedding/index", + id: "integrations/tools/index", }, }, { @@ -268,57 +268,57 @@ module.exports = { }, { type: "category", - label: "Retrievers", + label: "Embedding models", collapsible: false, items: [ { type: "autogenerated", - dirName: "integrations/retrievers", + dirName: "integrations/text_embedding", className: "hidden", }, ], link: { type: "doc", - id: "integrations/retrievers/index", + id: "integrations/text_embedding/index", }, }, { type: "category", - label: "Tools/Toolkits", - collapsible: false, + label: "Other", + collapsed: true, items: [ { - type: "autogenerated", - dirName: "integrations/tools", - className: "hidden", + type: "category", + label: "LLMs", + collapsible: false, + items: [ + { + type: "autogenerated", + dirName: "integrations/llms", + className: "hidden", + }, + ], + link: { + type: "doc", + id: "integrations/llms/index", + }, }, - ], - link: { - type: "doc", - id: "integrations/tools/index", - }, - }, - { - type: "category", - label: "Key-value stores", - collapsible: false, - items: [ { - type: "autogenerated", - dirName: "integrations/stores", - className: "hidden", + type: "category", + label: "Key-value stores", + collapsible: false, + items: [ + { + type: "autogenerated", + dirName: "integrations/stores", + className: "hidden", + }, + ], + link: { + type: "doc", + id: "integrations/stores/index", + }, }, - ], - link: { - type: "doc", - id: "integrations/stores/index", - }, - }, - { - type: "category", - label: "Other", - collapsed: true, - items: [ { type: "category", label: "Document transformers", diff --git a/docs/src/theme/ChatModelTabs.js b/docs/src/theme/ChatModelTabs.js index 9579e20077f689..9d27a9a44a135a 100644 --- a/docs/src/theme/ChatModelTabs.js +++ b/docs/src/theme/ChatModelTabs.js @@ -78,7 +78,7 @@ export default function ChatModelTabs(props) { azureParams ?? `\n azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],\n azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],\n openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],\n`; const nvidiaParamsOrDefault = nvidiaParams ?? `model="meta/llama3-70b-instruct"` - const awsBedrockParamsOrDefault = awsBedrockParams ?? `model_id="anthropic.claude-3-5-sonnet-20240620-v1:0"`; + const awsBedrockParamsOrDefault = awsBedrockParams ?? `model="anthropic.claude-3-5-sonnet-20240620-v1:0",\n beta_use_converse_api=True`; const llmVarName = customVarName ?? "model"; @@ -119,6 +119,15 @@ export default function ChatModelTabs(props) { default: false, shouldHide: hideGoogle, }, + { + value: "AWS", + label: "AWS", + text: `from langchain_aws import ChatBedrock\n\n${llmVarName} = ChatBedrock(${awsBedrockParamsOrDefault})`, + apiKeyText: "# Ensure your AWS credentials are configured", + packageName: "langchain-aws", + default: false, + shouldHide: hideAWS, + }, { value: "Cohere", label: "Cohere", @@ -173,15 +182,6 @@ export default function ChatModelTabs(props) { default: false, shouldHide: hideTogether, }, - { - value: "AWS", - label: "AWS", - text: `from langchain_aws import ChatBedrock\n\n${llmVarName} = ChatBedrock(${awsBedrockParamsOrDefault})`, - apiKeyText: "# Ensure your AWS credentials are configured", - packageName: "langchain-aws", - default: false, - shouldHide: hideAWS, - }, ]; return ( diff --git a/libs/community/extended_testing_deps.txt b/libs/community/extended_testing_deps.txt index 5be87606873d42..d331fb66e85dd2 100644 --- a/libs/community/extended_testing_deps.txt +++ b/libs/community/extended_testing_deps.txt @@ -86,6 +86,7 @@ telethon>=1.28.5,<2 tidb-vector>=0.0.3,<1.0.0 timescale-vector==0.0.1 tqdm>=4.48.0 +tiktoken>=0.8.0 tree-sitter>=0.20.2,<0.21 tree-sitter-languages>=1.8.0,<2 upstash-redis>=1.1.0,<2 diff --git a/libs/community/langchain_community/chat_models/__init__.py b/libs/community/langchain_community/chat_models/__init__.py index 0cff776c786d79..ec514566f30533 100644 --- a/libs/community/langchain_community/chat_models/__init__.py +++ b/libs/community/langchain_community/chat_models/__init__.py @@ -155,6 +155,9 @@ from langchain_community.chat_models.promptlayer_openai import ( PromptLayerChatOpenAI, ) + from langchain_community.chat_models.reka import ( + ChatReka, + ) from langchain_community.chat_models.sambanova import ( ChatSambaNovaCloud, ChatSambaStudio, @@ -226,6 +229,7 @@ "ChatOllama", "ChatOpenAI", "ChatPerplexity", + "ChatReka", "ChatPremAI", "ChatSambaNovaCloud", "ChatSambaStudio", @@ -290,6 +294,7 @@ "ChatOCIModelDeploymentTGI": "langchain_community.chat_models.oci_data_science", "ChatOllama": "langchain_community.chat_models.ollama", "ChatOpenAI": "langchain_community.chat_models.openai", + "ChatReka": "langchain_community.chat_models.reka", "ChatPerplexity": "langchain_community.chat_models.perplexity", "ChatSambaNovaCloud": "langchain_community.chat_models.sambanova", "ChatSambaStudio": "langchain_community.chat_models.sambanova", diff --git a/libs/community/langchain_community/chat_models/reka.py b/libs/community/langchain_community/chat_models/reka.py new file mode 100644 index 00000000000000..f56001f37b1600 --- /dev/null +++ b/libs/community/langchain_community/chat_models/reka.py @@ -0,0 +1,435 @@ +import json +from typing import ( + Any, + AsyncIterator, + Callable, + Dict, + Iterator, + List, + Mapping, + Optional, + Sequence, + Type, + Union, +) + +from langchain_core.callbacks import ( + AsyncCallbackManagerForLLMRun, + CallbackManagerForLLMRun, +) +from langchain_core.language_models import LanguageModelInput +from langchain_core.language_models.chat_models import ( + BaseChatModel, + agenerate_from_stream, + generate_from_stream, +) +from langchain_core.messages import ( + AIMessage, + AIMessageChunk, + BaseMessage, + HumanMessage, + SystemMessage, + ToolMessage, +) +from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult +from langchain_core.runnables import Runnable +from langchain_core.tools import BaseTool +from langchain_core.utils import get_from_dict_or_env +from langchain_core.utils.function_calling import convert_to_openai_tool +from pydantic import BaseModel, ConfigDict, Field, model_validator + +DEFAULT_REKA_MODEL = "reka-flash" + +ContentType = Union[str, List[Union[str, Dict[str, Any]]]] + + +def process_content_item(item: Dict[str, Any]) -> Dict[str, Any]: + """Process a single content item.""" + if item["type"] == "image_url": + image_url = item["image_url"] + if isinstance(image_url, dict) and "url" in image_url: + # If it's in LangChain format, extract the URL value + item["image_url"] = image_url["url"] + return item + + +def process_content(content: ContentType) -> List[Dict[str, Any]]: + """Process content to handle both text and media inputs, + returning a list of content items.""" + if isinstance(content, str): + return [{"type": "text", "text": content}] + elif isinstance(content, list): + result = [] + for item in content: + if isinstance(item, str): + result.append({"type": "text", "text": item}) + elif isinstance(item, dict): + result.append(process_content_item(item)) + else: + raise ValueError(f"Invalid content item format: {item}") + return result + else: + raise ValueError("Invalid content format") + + +def convert_to_reka_messages(messages: List[BaseMessage]) -> List[Dict[str, Any]]: + """Convert LangChain messages to Reka message format.""" + reka_messages: List[Dict[str, Any]] = [] + system_message: Optional[str] = None + + for message in messages: + if isinstance(message, SystemMessage): + if system_message is None: + if isinstance(message.content, str): + system_message = message.content + else: + raise TypeError("SystemMessage content must be a string.") + else: + raise ValueError("Multiple system messages are not supported.") + elif isinstance(message, HumanMessage): + processed_content = process_content(message.content) + if system_message: + if ( + processed_content + and isinstance(processed_content[0], dict) + and processed_content[0].get("type") == "text" + and "text" in processed_content[0] + ): + processed_content[0]["text"] = ( + f"{system_message}\n{processed_content[0]['text']}" + ) + else: + processed_content.insert( + 0, {"type": "text", "text": system_message} + ) + system_message = None + reka_messages.append({"role": "user", "content": processed_content}) + elif isinstance(message, AIMessage): + reka_message: Dict[str, Any] = {"role": "assistant"} + if message.content: + processed_content = process_content(message.content) + reka_message["content"] = processed_content + if "tool_calls" in message.additional_kwargs: + tool_calls = message.additional_kwargs["tool_calls"] + formatted_tool_calls = [] + for tool_call in tool_calls: + formatted_tool_call = { + "id": tool_call["id"], + "name": tool_call["function"]["name"], + "parameters": json.loads(tool_call["function"]["arguments"]), + } + formatted_tool_calls.append(formatted_tool_call) + reka_message["tool_calls"] = formatted_tool_calls + reka_messages.append(reka_message) + elif isinstance(message, ToolMessage): + content_list: List[Dict[str, Any]] = [] + content_list.append( + { + "tool_call_id": message.tool_call_id, + "output": json.dumps({"status": message.content}), + } + ) + reka_messages.append( + { + "role": "tool_output", + "content": content_list, + } + ) + else: + raise ValueError(f"Unsupported message type: {type(message)}") + + return reka_messages + + +class ChatReka(BaseChatModel): + """Reka chat large language models.""" + + client: Any = None #: :meta private: + async_client: Any = None #: :meta private: + model: str = Field(default=DEFAULT_REKA_MODEL) + max_tokens: int = Field(default=256) + temperature: Optional[float] = None + streaming: bool = False + default_request_timeout: Optional[float] = None + max_retries: int = 2 + reka_api_key: Optional[str] = None + model_kwargs: Dict[str, Any] = Field(default_factory=dict) + model_config = ConfigDict(extra="forbid") + token_counter: Optional[ + Callable[[Union[str, BaseMessage, List[BaseMessage]]], int] + ] = None + + @model_validator(mode="before") + @classmethod + def validate_environment(cls, values: Dict[str, Any]) -> Dict[str, Any]: + """Validate that API key and Python package exist in the environment.""" + reka_api_key = values.get("reka_api_key") + reka_api_key = get_from_dict_or_env( + {"reka_api_key": reka_api_key}, "reka_api_key", "REKA_API_KEY" + ) + values["reka_api_key"] = reka_api_key + + try: + # Import reka libraries here + from reka.client import AsyncReka, Reka + + values["client"] = Reka( + api_key=reka_api_key, + ) + values["async_client"] = AsyncReka( + api_key=reka_api_key, + ) + except ImportError: + raise ImportError( + "Could not import Reka Python package. " + "Please install it with `pip install reka-api`." + ) + return values + + @property + def _default_params(self) -> Mapping[str, Any]: + """Get the default parameters for calling Reka API.""" + params = { + "model": self.model, + "max_tokens": self.max_tokens, + } + if self.temperature is not None: + params["temperature"] = self.temperature + return {**params, **self.model_kwargs} + + @property + def _llm_type(self) -> str: + """Return type of chat model.""" + return "reka-chat" + + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + reka_messages = convert_to_reka_messages(messages) + params = {**self._default_params, **kwargs} + if stop: + params["stop"] = stop + + stream = self.client.chat.create_stream(messages=reka_messages, **params) + + for chunk in stream: + content = chunk.responses[0].chunk.content + chat_chunk = ChatGenerationChunk(message=AIMessageChunk(content=content)) + if run_manager: + run_manager.on_llm_new_token(content, chunk=chat_chunk) + yield chat_chunk + + async def _astream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[ChatGenerationChunk]: + reka_messages = convert_to_reka_messages(messages) + params = {**self._default_params, **kwargs} + if stop: + params["stop"] = stop + + stream = self.async_client.chat.create_stream(messages=reka_messages, **params) + + async for chunk in stream: + content = chunk.responses[0].chunk.content + chat_chunk = ChatGenerationChunk(message=AIMessageChunk(content=content)) + if run_manager: + await run_manager.on_llm_new_token(content, chunk=chat_chunk) + yield chat_chunk + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + if self.streaming: + return generate_from_stream( + self._stream(messages, stop=stop, run_manager=run_manager, **kwargs) + ) + + reka_messages = convert_to_reka_messages(messages) + params = {**self._default_params, **kwargs} + if stop: + params["stop"] = stop + response = self.client.chat.create(messages=reka_messages, **params) + + if response.responses[0].message.tool_calls: + tool_calls = response.responses[0].message.tool_calls + message = AIMessage( + content="", # Empty string instead of None + additional_kwargs={ + "tool_calls": [ + { + "id": tc.id, + "type": "function", + "function": { + "name": tc.name, + "arguments": json.dumps(tc.parameters), + }, + } + for tc in tool_calls + ] + }, + ) + else: + content = response.responses[0].message.content + # Ensure content is never None + message = AIMessage(content=content if content is not None else "") + + return ChatResult(generations=[ChatGeneration(message=message)]) + + async def _agenerate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + if self.streaming: + return await agenerate_from_stream( + self._astream(messages, stop=stop, run_manager=run_manager, **kwargs) + ) + + reka_messages = convert_to_reka_messages(messages) + params = {**self._default_params, **kwargs} + if stop: + params["stop"] = stop + response = await self.async_client.chat.create(messages=reka_messages, **params) + + if response.responses[0].message.tool_calls: + tool_calls = response.responses[0].message.tool_calls + message = AIMessage( + content="", # Empty string instead of None + additional_kwargs={ + "tool_calls": [ + { + "id": tc.id, + "type": "function", + "function": { + "name": tc.name, + "arguments": json.dumps(tc.parameters), + }, + } + for tc in tool_calls + ] + }, + ) + else: + content = response.responses[0].message.content + # Ensure content is never None + message = AIMessage(content=content if content is not None else "") + + return ChatResult(generations=[ChatGeneration(message=message)]) + + def get_num_tokens(self, input: Union[str, BaseMessage, List[BaseMessage]]) -> int: + """Calculate number of tokens. + + Args: + input: Either a string, a single BaseMessage, or a list of BaseMessages. + + Returns: + int: Number of tokens in the input. + + Raises: + ImportError: If tiktoken is not installed. + ValueError: If message content is not a string. + """ + if self.token_counter is not None: + return self.token_counter(input) + + try: + import tiktoken + except ImportError: + raise ImportError( + "Could not import tiktoken python package. " + "Please install it with `pip install tiktoken`." + ) + + encoding = tiktoken.get_encoding("cl100k_base") + + if isinstance(input, str): + return len(encoding.encode(input)) + elif isinstance(input, BaseMessage): + content = input.content + if not isinstance(content, str): + raise ValueError( + f"Message content must be a string, got {type(content)}" + ) + return len(encoding.encode(content)) + elif isinstance(input, list): + total = 0 + for msg in input: + content = msg.content + if not isinstance(content, str): + raise ValueError( + f"Message content must be a string, got {type(content)}" + ) + total += len(encoding.encode(content)) + return total + else: + raise TypeError(f"Unsupported input type: {type(input)}") + + def bind_tools( + self, + tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], + *, + tool_choice: str = "auto", + strict: Optional[bool] = None, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, BaseMessage]: + """Bind tool-like objects to this chat model. + + The `tool_choice` parameter controls how the model uses the tools you pass. + There are three available options: + + - `"auto"`: Lets the model decide whether or not to invoke a tool. This is the + recommended way to do function calling with our models. + - `"none"`: Disables tool calling. In this case, even if you pass tools to + the model, the model will not invoke any tools. + - `"tool"`: Forces the model to invoke one or more of the tools it has + been passed. + + Args: + tools: A list of tool definitions to bind to this chat model. + Supports any tool definition handled by + :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`. + tool_choice: Controls how the model uses the tools you pass. + Options are "auto", "none", or "tool". Defaults to "auto". + strict: + If True, model output is guaranteed to exactly match the JSON Schema + provided in the tool definition. + If False, input schema will not be validated + and model output will not be validated. + If None, ``strict`` argument will not + be passed to the model. + kwargs: Any additional parameters are passed directly to the model. + + Returns: + Runnable: An executable chain or component. + """ + formatted_tools = [ + convert_to_openai_tool(tool, strict=strict) for tool in tools + ] + + # Ensure tool_choice is one of the allowed options + if tool_choice not in ("auto", "none", "tool"): + raise ValueError( + f"Invalid tool_choice '{tool_choice}' provided. " + "Tool choice must be one of: 'auto', 'none', or 'tool'." + ) + + # Map tool_choice to the parameter expected by the Reka API + kwargs["tool_choice"] = tool_choice + + # Pass the tools and updated kwargs to the model + formatted_tools = [tool["function"] for tool in formatted_tools] + return super().bind(tools=formatted_tools, **kwargs) diff --git a/libs/community/langchain_community/llms/__init__.py b/libs/community/langchain_community/llms/__init__.py index 1bf5e0a7ee76eb..9a74a30dce4bd7 100644 --- a/libs/community/langchain_community/llms/__init__.py +++ b/libs/community/langchain_community/llms/__init__.py @@ -518,6 +518,12 @@ def _import_sagemaker_endpoint() -> Type[BaseLLM]: return SagemakerEndpoint +def _import_sambanovacloud() -> Type[BaseLLM]: + from langchain_community.llms.sambanova import SambaNovaCloud + + return SambaNovaCloud + + def _import_sambastudio() -> Type[BaseLLM]: from langchain_community.llms.sambanova import SambaStudio @@ -821,6 +827,8 @@ def __getattr__(name: str) -> Any: return _import_rwkv() elif name == "SagemakerEndpoint": return _import_sagemaker_endpoint() + elif name == "SambaNovaCloud": + return _import_sambanovacloud() elif name == "SambaStudio": return _import_sambastudio() elif name == "SelfHostedPipeline": @@ -957,6 +965,7 @@ def __getattr__(name: str) -> Any: "RWKV", "Replicate", "SagemakerEndpoint", + "SambaNovaCloud", "SambaStudio", "SelfHostedHuggingFaceLLM", "SelfHostedPipeline", @@ -1054,6 +1063,7 @@ def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]: "replicate": _import_replicate, "rwkv": _import_rwkv, "sagemaker_endpoint": _import_sagemaker_endpoint, + "sambanovacloud": _import_sambanovacloud, "sambastudio": _import_sambastudio, "self_hosted": _import_self_hosted, "self_hosted_hugging_face": _import_self_hosted_hugging_face, diff --git a/libs/community/langchain_community/llms/sambanova.py b/libs/community/langchain_community/llms/sambanova.py index 4ae6a1e6a28098..7b4badce9b2690 100644 --- a/libs/community/langchain_community/llms/sambanova.py +++ b/libs/community/langchain_community/llms/sambanova.py @@ -537,3 +537,329 @@ def _call( response = self._handle_request(prompt, stop, streaming=False) completion = self._process_response(response) return completion + + +class SambaNovaCloud(LLM): + """ + SambaNova Cloud large language models. + + Setup: + To use, you should have the environment variables: + ``SAMBANOVA_URL`` set with SambaNova Cloud URL. + defaults to http://cloud.sambanova.ai/ + ``SAMBANOVA_API_KEY`` set with your SambaNova Cloud API Key. + Example: + .. code-block:: python + from langchain_community.llms.sambanova import SambaNovaCloud + SambaNovaCloud( + sambanova_api_key="your-SambaNovaCloud-API-key, + model = model name, + max_tokens = max number of tokens to generate, + temperature = model temperature, + top_p = model top p, + top_k = model top k + ) + Key init args β€” completion params: + model: str + The name of the model to use, e.g., Meta-Llama-3-70B-Instruct-4096 + (set for CoE endpoints). + streaming: bool + Whether to use streaming handler when using non streaming methods + max_tokens: int + max tokens to generate + temperature: float + model temperature + top_p: float + model top p + top_k: int + model top k + + Key init args β€” client params: + sambanova_url: str + SambaNovaCloud Url defaults to http://cloud.sambanova.ai/ + sambanova_api_key: str + SambaNovaCloud api key + Instantiate: + .. code-block:: python + from langchain_community.llms.sambanova import SambaNovaCloud + SambaNovaCloud( + sambanova_api_key="your-SambaNovaCloud-API-key, + model = model name, + max_tokens = max number of tokens to generate, + temperature = model temperature, + top_p = model top p, + top_k = model top k + ) + Invoke: + .. code-block:: python + prompt = "tell me a joke" + response = llm.invoke(prompt) + Stream: + .. code-block:: python + for chunk in llm.stream(prompt): + print(chunk, end="", flush=True) + Async: + .. code-block:: python + response = llm.ainvoke(prompt) + await response + """ + + sambanova_url: str = Field(default="") + """SambaNova Cloud Url""" + + sambanova_api_key: SecretStr = Field(default="") + """SambaNova Cloud api key""" + + model: str = Field(default="Meta-Llama-3.1-8B-Instruct") + """The name of the model""" + + streaming: bool = Field(default=False) + """Whether to use streaming handler when using non streaming methods""" + + max_tokens: int = Field(default=1024) + """max tokens to generate""" + + temperature: float = Field(default=0.7) + """model temperature""" + + top_p: Optional[float] = Field(default=None) + """model top p""" + + top_k: Optional[int] = Field(default=None) + """model top k""" + + stream_options: dict = Field(default={"include_usage": True}) + """stream options, include usage to get generation metrics""" + + class Config: + populate_by_name = True + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether this model can be serialized by Langchain.""" + return False + + @property + def lc_secrets(self) -> Dict[str, str]: + return {"sambanova_api_key": "sambanova_api_key"} + + @property + def _identifying_params(self) -> Dict[str, Any]: + """Return a dictionary of identifying parameters. + + This information is used by the LangChain callback system, which + is used for tracing purposes make it possible to monitor LLMs. + """ + return { + "model": self.model, + "streaming": self.streaming, + "max_tokens": self.max_tokens, + "temperature": self.temperature, + "top_p": self.top_p, + "top_k": self.top_k, + "stream_options": self.stream_options, + } + + @property + def _llm_type(self) -> str: + """Get the type of language model used by this chat model.""" + return "sambanovacloud-llm" + + def __init__(self, **kwargs: Any) -> None: + """init and validate environment variables""" + kwargs["sambanova_url"] = get_from_dict_or_env( + kwargs, + "sambanova_url", + "SAMBANOVA_URL", + default="https://api.sambanova.ai/v1/chat/completions", + ) + kwargs["sambanova_api_key"] = convert_to_secret_str( + get_from_dict_or_env(kwargs, "sambanova_api_key", "SAMBANOVA_API_KEY") + ) + super().__init__(**kwargs) + + def _handle_request( + self, + prompt: Union[List[str], str], + stop: Optional[List[str]] = None, + streaming: Optional[bool] = False, + ) -> Response: + """ + Performs a post request to the LLM API. + + Args: + prompt: The prompt to pass into the model. + stop: list of stop tokens + + Returns: + A request Response object + """ + if isinstance(prompt, str): + prompt = [prompt] + + messages_dict = [{"role": "user", "content": prompt[0]}] + data = { + "messages": messages_dict, + "stream": streaming, + "max_tokens": self.max_tokens, + "stop": stop, + "model": self.model, + "temperature": self.temperature, + "top_p": self.top_p, + "top_k": self.top_k, + } + data = {key: value for key, value in data.items() if value is not None} + headers = { + "Authorization": f"Bearer " f"{self.sambanova_api_key.get_secret_value()}", + "Content-Type": "application/json", + } + + http_session = requests.Session() + if streaming: + response = http_session.post( + self.sambanova_url, headers=headers, json=data, stream=True + ) + else: + response = http_session.post( + self.sambanova_url, headers=headers, json=data, stream=False + ) + + if response.status_code != 200: + raise RuntimeError( + f"Sambanova / complete call failed with status code " + f"{response.status_code}." + f"{response.text}." + ) + return response + + def _process_response(self, response: Response) -> str: + """ + Process a non streaming response from the api + + Args: + response: A request Response object + + Returns + completion: a string with model generation + """ + + # Extract json payload form response + try: + response_dict = response.json() + except Exception as e: + raise RuntimeError( + f"Sambanova /complete call failed couldn't get JSON response {e}" + f"response: {response.text}" + ) + + completion = response_dict["choices"][0]["message"]["content"] + + return completion + + def _process_stream_response(self, response: Response) -> Iterator[GenerationChunk]: + """ + Process a streaming response from the api + + Args: + response: An iterable request Response object + + Yields: + GenerationChunk: a GenerationChunk with model partial generation + """ + + try: + import sseclient + except ImportError: + raise ImportError( + "could not import sseclient library" + "Please install it with `pip install sseclient-py`." + ) + + client = sseclient.SSEClient(response) + for event in client.events(): + if event.event == "error_event": + raise RuntimeError( + f"Sambanova /complete call failed with status code " + f"{response.status_code}." + f"{event.data}." + ) + try: + # check if the response is not a final event ("[DONE]") + if event.data != "[DONE]": + if isinstance(event.data, str): + data = json.loads(event.data) + else: + raise RuntimeError( + f"Sambanova /complete call failed with status code " + f"{response.status_code}." + f"{event.data}." + ) + if data.get("error"): + raise RuntimeError( + f"Sambanova /complete call failed with status code " + f"{response.status_code}." + f"{event.data}." + ) + if len(data["choices"]) > 0: + content = data["choices"][0]["delta"]["content"] + else: + content = "" + generated_chunk = GenerationChunk(text=content) + yield generated_chunk + + except Exception as e: + raise RuntimeError( + f"Error getting content chunk raw streamed response: {e}" + f"data: {event.data}" + ) + + def _call( + self, + prompt: Union[List[str], str], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """Call out to SambaNovaCloud complete endpoint. + + Args: + prompt: The prompt to pass into the model. + stop: Optional list of stop words to use when generating. + + Returns: + The string generated by the model. + """ + if self.streaming: + completion = "" + for chunk in self._stream( + prompt=prompt, stop=stop, run_manager=run_manager, **kwargs + ): + completion += chunk.text + + return completion + + response = self._handle_request(prompt, stop, streaming=False) + completion = self._process_response(response) + return completion + + def _stream( + self, + prompt: Union[List[str], str], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[GenerationChunk]: + """Call out to SambaNovaCloud complete endpoint. + + Args: + prompt: The prompt to pass into the model. + stop: Optional list of stop words to use when generating. + + Returns: + The string generated by the model. + """ + response = self._handle_request(prompt, stop, streaming=True) + for chunk in self._process_stream_response(response): + if run_manager: + run_manager.on_llm_new_token(chunk.text) + yield chunk diff --git a/libs/community/langchain_community/vectorstores/opensearch_vector_search.py b/libs/community/langchain_community/vectorstores/opensearch_vector_search.py index 3e5bb280035a47..f08620eef63838 100644 --- a/libs/community/langchain_community/vectorstores/opensearch_vector_search.py +++ b/libs/community/langchain_community/vectorstores/opensearch_vector_search.py @@ -401,7 +401,7 @@ def __init__( self.is_aoss = _is_aoss_enabled(http_auth=http_auth) self.client = _get_opensearch_client(opensearch_url, **kwargs) self.async_client = _get_async_opensearch_client(opensearch_url, **kwargs) - self.engine = kwargs.get("engine") + self.engine = kwargs.get("engine", "nmslib") @property def embeddings(self) -> Embeddings: @@ -420,7 +420,7 @@ def __add( index_name = kwargs.get("index_name", self.index_name) text_field = kwargs.get("text_field", "text") dim = len(embeddings[0]) - engine = kwargs.get("engine", "nmslib") + engine = kwargs.get("engine", self.engine) space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512) @@ -461,7 +461,7 @@ async def __aadd( index_name = kwargs.get("index_name", self.index_name) text_field = kwargs.get("text_field", "text") dim = len(embeddings[0]) - engine = kwargs.get("engine", "nmslib") + engine = kwargs.get("engine", self.engine) space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512) @@ -530,7 +530,7 @@ def create_index( ) if is_appx_search: - engine = kwargs.get("engine", "nmslib") + engine = kwargs.get("engine", self.engine) space_type = kwargs.get("space_type", "l2") ef_search = kwargs.get("ef_search", 512) ef_construction = kwargs.get("ef_construction", 512) diff --git a/libs/community/poetry.lock b/libs/community/poetry.lock index 71a7cc57432f3b..7885dc8e2ebb44 100644 --- a/libs/community/poetry.lock +++ b/libs/community/poetry.lock @@ -1847,8 +1847,8 @@ langchain-core = "^0.3.15" langchain-text-splitters = "^0.3.0" langsmith = "^0.1.17" numpy = [ - {version = ">=1,<2", markers = "python_version < \"3.12\""}, - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.22.4,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.2,<2", markers = "python_version >= \"3.12\""}, ] pydantic = "^2.7.4" PyYAML = ">=5.3" @@ -1862,7 +1862,7 @@ url = "../langchain" [[package]] name = "langchain-core" -version = "0.3.17" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -1886,8 +1886,8 @@ type = "directory" url = "../core" [[package]] -name = "langchain-standard-tests" -version = "0.3.0" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -2532,30 +2532,41 @@ files = [ {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, @@ -3766,7 +3777,9 @@ python-versions = ">=3.7" files = [ {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, @@ -3787,22 +3800,29 @@ files = [ {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, + {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, ] @@ -4545,4 +4565,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "a02b86c4746a93545fe99f8a30ea9becedead16f881254d7763a052ec50c9648" +content-hash = "b369e0441d1066761e311c967981f172881e87fc9f2f5b96b9891667bd336404" diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml index c196a1c3602924..25c63358ed7fd9 100644 --- a/libs/community/pyproject.toml +++ b/libs/community/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = [ "poetry-core>=1.0.0",] +requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry] @@ -12,12 +12,15 @@ readme = "README.md" repository = "https://github.com/langchain-ai/langchain" [tool.ruff] -exclude = [ "tests/examples/non-utf8-encoding.py", "tests/integration_tests/examples/non-utf8-encoding.py",] +exclude = [ + "tests/examples/non-utf8-encoding.py", + "tests/integration_tests/examples/non-utf8-encoding.py", +] [tool.mypy] ignore_missing_imports = "True" disallow_untyped_defs = "True" -exclude = [ "notebooks", "examples", "example_data",] +exclude = ["notebooks", "examples", "example_data"] [tool.codespell] skip = ".git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package-lock.json,example_data,_dist,examples,*.trig" @@ -42,24 +45,32 @@ pydantic-settings = "^2.4.0" langsmith = "^0.1.125" httpx-sse = "^0.4.0" [[tool.poetry.dependencies.numpy]] -version = "^1" +version = ">=1.22.4,<2" python = "<3.12" [[tool.poetry.dependencies.numpy]] -version = "^1.26.0" +version = ">=1.26.2,<2" python = ">=3.12" [tool.ruff.lint] -select = [ "E", "F", "I", "T201",] +select = ["E", "F", "I", "T201"] [tool.coverage.run] -omit = [ "tests/*",] +omit = ["tests/*"] [tool.pytest.ini_options] addopts = "--strict-markers --strict-config --durations=5 --snapshot-warn-unused -vv" -markers = [ "requires: mark tests as requiring a specific library", "scheduled: mark tests to run in scheduled testing", "compile: mark placeholder test used to compile integration tests without running them",] +markers = [ + "requires: mark tests as requiring a specific library", + "scheduled: mark tests to run in scheduled testing", + "compile: mark placeholder test used to compile integration tests without running them", +] asyncio_mode = "auto" -filterwarnings = [ "ignore::langchain_core._api.beta_decorator.LangChainBetaWarning", "ignore::langchain_core._api.deprecation.LangChainDeprecationWarning:test", "ignore::langchain_core._api.deprecation.LangChainPendingDeprecationWarning:test",] +filterwarnings = [ + "ignore::langchain_core._api.beta_decorator.LangChainBetaWarning", + "ignore::langchain_core._api.deprecation.LangChainDeprecationWarning:test", + "ignore::langchain_core._api.deprecation.LangChainPendingDeprecationWarning:test", +] [tool.poetry.group.test] optional = true @@ -138,7 +149,7 @@ develop = true path = "../langchain" develop = true -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../standard-tests" develop = true diff --git a/libs/community/tests/integration_tests/chat_models/test_reka.py b/libs/community/tests/integration_tests/chat_models/test_reka.py new file mode 100644 index 00000000000000..848a0f04bcf876 --- /dev/null +++ b/libs/community/tests/integration_tests/chat_models/test_reka.py @@ -0,0 +1,222 @@ +"""Test Reka API wrapper.""" + +import logging +from typing import List + +import pytest +from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + SystemMessage, + ToolMessage, +) +from langchain_core.outputs import ChatGeneration, LLMResult + +from langchain_community.chat_models.reka import ChatReka +from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_call() -> None: + """Test a simple call to Reka.""" + chat = ChatReka(model="reka-flash", verbose=True) + message = HumanMessage(content="Hello") + response = chat.invoke([message]) + assert isinstance(response, AIMessage) + assert isinstance(response.content, str) + logger.debug(f"Response content: {response.content}") + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_generate() -> None: + """Test the generate method of Reka.""" + chat = ChatReka(model="reka-flash", verbose=True) + chat_messages: List[List[BaseMessage]] = [ + [HumanMessage(content="How many toes do dogs have?")] + ] + messages_copy = [messages.copy() for messages in chat_messages] + result: LLMResult = chat.generate(chat_messages) + assert isinstance(result, LLMResult) + for response in result.generations[0]: + assert isinstance(response, ChatGeneration) + assert isinstance(response.text, str) + assert response.text == response.message.content + logger.debug(f"Generated response: {response.text}") + assert chat_messages == messages_copy + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_streaming() -> None: + """Test streaming tokens from Reka.""" + chat = ChatReka(model="reka-flash", streaming=True, verbose=True) + message = HumanMessage(content="Tell me a story.") + response = chat.invoke([message]) + assert isinstance(response, AIMessage) + assert isinstance(response.content, str) + logger.debug(f"Streaming response content: {response.content}") + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_streaming_callback() -> None: + """Test that streaming correctly invokes callbacks.""" + callback_handler = FakeCallbackHandler() + chat = ChatReka( + model="reka-flash", + streaming=True, + callbacks=[callback_handler], + verbose=True, + ) + message = HumanMessage(content="Write me a sentence with 10 words.") + chat.invoke([message]) + assert callback_handler.llm_streams > 1 + logger.debug(f"Number of LLM streams: {callback_handler.llm_streams}") + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +async def test_reka_async_streaming_callback() -> None: + """Test asynchronous streaming with callbacks.""" + callback_handler = FakeCallbackHandler() + chat = ChatReka( + model="reka-flash", + streaming=True, + callbacks=[callback_handler], + verbose=True, + ) + chat_messages: List[BaseMessage] = [ + HumanMessage(content="How many toes do dogs have?") + ] + result: LLMResult = await chat.agenerate([chat_messages]) + assert callback_handler.llm_streams > 1 + assert isinstance(result, LLMResult) + for response in result.generations[0]: + assert isinstance(response, ChatGeneration) + assert isinstance(response.text, str) + assert response.text == response.message.content + logger.debug(f"Async generated response: {response.text}") + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_tool_usage_integration() -> None: + """Test tool usage with Reka API integration.""" + # Initialize the ChatReka model with tools and verbose logging + chat_reka = ChatReka(model="reka-flash", verbose=True) + tools = [ + { + "type": "function", + "function": { + "name": "get_product_availability", + "description": ( + "Determine whether a product is currently in stock given " + "a product ID." + ), + "parameters": { + "type": "object", + "properties": { + "product_id": { + "type": "string", + "description": ( + "The unique product ID to check availability for" + ), + }, + }, + "required": ["product_id"], + }, + }, + }, + ] + chat_reka_with_tools = chat_reka.bind_tools(tools) + + # Start a conversation + messages: List[BaseMessage] = [ + HumanMessage(content="Is product A12345 in stock right now?") + ] + + # Get the initial response + response = chat_reka_with_tools.invoke(messages) + assert isinstance(response, AIMessage) + logger.debug(f"Initial AI message: {response.content}") + + # Check if the model wants to use a tool + if "tool_calls" in response.additional_kwargs: + tool_calls = response.additional_kwargs["tool_calls"] + for tool_call in tool_calls: + function_name = tool_call["function"]["name"] + arguments = tool_call["function"]["arguments"] + logger.debug( + f"Tool call requested: {function_name} with arguments {arguments}" + ) + + # Simulate executing the tool + tool_output = "AVAILABLE" + + tool_message = ToolMessage( + content=tool_output, tool_call_id=tool_call["id"] + ) + messages.append(response) + messages.append(tool_message) + + final_response = chat_reka_with_tools.invoke(messages) + assert isinstance(final_response, AIMessage) + logger.debug(f"Final AI message: {final_response.content}") + + # Assert that the response message is non-empty + assert final_response.content, "The final response content is empty." + else: + pytest.fail("The model did not request a tool.") + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_system_message() -> None: + """Test Reka with system message.""" + chat = ChatReka(model="reka-flash", verbose=True) + messages = [ + SystemMessage(content="You are a helpful AI that speaks like Shakespeare."), + HumanMessage(content="Tell me about the weather today."), + ] + response = chat.invoke(messages) + assert isinstance(response, AIMessage) + assert isinstance(response.content, str) + logger.debug(f"Response with system message: {response.content}") + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_system_message_multi_turn() -> None: + """Test multi-turn conversation with system message.""" + chat = ChatReka(model="reka-flash", verbose=True) + messages = [ + SystemMessage(content="You are a math tutor who explains concepts simply."), + HumanMessage(content="What is a prime number?"), + ] + + # First turn + response1 = chat.invoke(messages) + assert isinstance(response1, AIMessage) + messages.append(response1) + + # Second turn + messages.append(HumanMessage(content="Can you give me an example?")) + response2 = chat.invoke(messages) + assert isinstance(response2, AIMessage) + + logger.debug(f"First response: {response1.content}") + logger.debug(f"Second response: {response2.content}") diff --git a/libs/community/tests/integration_tests/llms/test_sambanova.py b/libs/community/tests/integration_tests/llms/test_sambanova.py index 5f082df52785c3..345ae8bcf013a5 100644 --- a/libs/community/tests/integration_tests/llms/test_sambanova.py +++ b/libs/community/tests/integration_tests/llms/test_sambanova.py @@ -1,13 +1,20 @@ -"""Test sambanova API wrapper. +"""Test sambanova API llm wrappers. -In order to run this test, you need to have a sambastudio base url, -project id, endpoint id, and api key. -You'll then need to set SAMBASTUDIO_BASE_URL, SAMBASTUDIO_BASE_URI -SAMBASTUDIO_PROJECT_ID, SAMBASTUDIO_ENDPOINT_ID, and SAMBASTUDIO_API_KEY -environment variables. +In order to run this test, you need to have a sambastudio url, and api key +and a sambanova cloud api key. +You'll then need to set SAMBASTUDIO_URL, and SAMBASTUDIO_API_KEY, +and SAMBANOVA_API_KEY environment variables. """ -from langchain_community.llms.sambanova import SambaStudio +from langchain_community.llms.sambanova import SambaNovaCloud, SambaStudio + + +def test_sambanova_cloud_call() -> None: + """Test simple non-streaming call to sambastudio.""" + llm = SambaNovaCloud() + output = llm.invoke("What is LangChain") + assert output + assert isinstance(output, str) def test_sambastudio_call() -> None: diff --git a/libs/community/tests/unit_tests/chat_models/test_imports.py b/libs/community/tests/unit_tests/chat_models/test_imports.py index ee3240168e01ec..4022fe781bf02e 100644 --- a/libs/community/tests/unit_tests/chat_models/test_imports.py +++ b/libs/community/tests/unit_tests/chat_models/test_imports.py @@ -45,6 +45,7 @@ "ChatVertexAI", "ChatYandexGPT", "ChatYuan2", + "ChatReka", "ChatZhipuAI", "ErnieBotChat", "FakeListChatModel", diff --git a/libs/community/tests/unit_tests/chat_models/test_reka.py b/libs/community/tests/unit_tests/chat_models/test_reka.py new file mode 100644 index 00000000000000..bbacadf7fd9269 --- /dev/null +++ b/libs/community/tests/unit_tests/chat_models/test_reka.py @@ -0,0 +1,372 @@ +import json +import os +from typing import Any, Dict, List +from unittest.mock import MagicMock, patch + +import pytest +from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage +from pydantic import ValidationError + +from langchain_community.chat_models import ChatReka +from langchain_community.chat_models.reka import ( + convert_to_reka_messages, + process_content, +) + +os.environ["REKA_API_KEY"] = "dummy_key" + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_model_param() -> None: + llm = ChatReka(model="reka-flash") + assert llm.model == "reka-flash" + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_model_kwargs() -> None: + llm = ChatReka(model_kwargs={"foo": "bar"}) + assert llm.model_kwargs == {"foo": "bar"} + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_incorrect_field() -> None: + """Test that providing an incorrect field raises ValidationError.""" + with pytest.raises(ValidationError): + ChatReka(unknown_field="bar") # type: ignore + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_initialization() -> None: + """Test Reka initialization.""" + # Verify that ChatReka can be initialized using a secret key provided + # as a parameter rather than an environment variable. + ChatReka(model="reka-flash", reka_api_key="test_key") + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +@pytest.mark.parametrize( + ("content", "expected"), + [ + ("Hello", [{"type": "text", "text": "Hello"}]), + ( + [ + {"type": "text", "text": "Describe this image"}, + { + "type": "image_url", + "image_url": "https://example.com/image.jpg", + }, + ], + [ + {"type": "text", "text": "Describe this image"}, + {"type": "image_url", "image_url": "https://example.com/image.jpg"}, + ], + ), + ( + [ + {"type": "text", "text": "Hello"}, + { + "type": "image_url", + "image_url": {"url": "https://example.com/image.jpg"}, + }, + ], + [ + {"type": "text", "text": "Hello"}, + {"type": "image_url", "image_url": "https://example.com/image.jpg"}, + ], + ), + ], +) +def test_process_content(content: Any, expected: List[Dict[str, Any]]) -> None: + result = process_content(content) + assert result == expected + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +@pytest.mark.parametrize( + ("messages", "expected"), + [ + ( + [HumanMessage(content="Hello")], + [{"role": "user", "content": [{"type": "text", "text": "Hello"}]}], + ), + ( + [ + HumanMessage( + content=[ + {"type": "text", "text": "Describe this image"}, + { + "type": "image_url", + "image_url": "https://example.com/image.jpg", + }, + ] + ), + AIMessage(content="It's a beautiful landscape."), + ], + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Describe this image"}, + { + "type": "image_url", + "image_url": "https://example.com/image.jpg", + }, + ], + }, + { + "role": "assistant", + "content": [ + {"type": "text", "text": "It's a beautiful landscape."} + ], + }, + ], + ), + ], +) +def test_convert_to_reka_messages( + messages: List[BaseMessage], expected: List[Dict[str, Any]] +) -> None: + result = convert_to_reka_messages(messages) + assert result == expected + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_streaming() -> None: + llm = ChatReka(streaming=True) + assert llm.streaming is True + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_temperature() -> None: + llm = ChatReka(temperature=0.5) + assert llm.temperature == 0.5 + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_max_tokens() -> None: + llm = ChatReka(max_tokens=100) + assert llm.max_tokens == 100 + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_default_params() -> None: + llm = ChatReka() + assert llm._default_params == { + "max_tokens": 256, + "model": "reka-flash", + } + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_identifying_params() -> None: + """Test that ChatReka identifies its default parameters correctly.""" + chat = ChatReka(model="reka-flash", temperature=0.7, max_tokens=256) + expected_params = { + "model": "reka-flash", + "temperature": 0.7, + "max_tokens": 256, + } + assert chat._default_params == expected_params + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_llm_type() -> None: + llm = ChatReka() + assert llm._llm_type == "reka-chat" + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_reka_tool_use_with_mocked_response() -> None: + with patch("reka.client.Reka") as MockReka: + # Mock the Reka client + mock_client = MockReka.return_value + mock_chat = MagicMock() + mock_client.chat = mock_chat + mock_response = MagicMock() + mock_message = MagicMock() + mock_tool_call = MagicMock() + mock_tool_call.id = "tool_call_1" + mock_tool_call.name = "search_tool" + mock_tool_call.parameters = {"query": "LangChain"} + mock_message.tool_calls = [mock_tool_call] + mock_message.content = None + mock_response.responses = [MagicMock(message=mock_message)] + mock_chat.create.return_value = mock_response + + llm = ChatReka() + messages: List[BaseMessage] = [HumanMessage(content="Tell me about LangChain")] + result = llm._generate(messages) + + assert len(result.generations) == 1 + ai_message = result.generations[0].message + assert ai_message.content == "" + assert "tool_calls" in ai_message.additional_kwargs + tool_calls = ai_message.additional_kwargs["tool_calls"] + assert len(tool_calls) == 1 + assert tool_calls[0]["id"] == "tool_call_1" + assert tool_calls[0]["function"]["name"] == "search_tool" + assert tool_calls[0]["function"]["arguments"] == json.dumps( + {"query": "LangChain"} + ) + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +@pytest.mark.parametrize( + ("messages", "expected"), + [ + # Test single system message + ( + [ + SystemMessage(content="You are a helpful assistant."), + HumanMessage(content="Hello"), + ], + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "You are a helpful assistant.\nHello"} + ], + } + ], + ), + # Test system message with multiple messages + ( + [ + SystemMessage(content="You are a helpful assistant."), + HumanMessage(content="What is 2+2?"), + AIMessage(content="4"), + HumanMessage(content="Thanks!"), + ], + [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "You are a helpful assistant.\nWhat is 2+2?", + } + ], + }, + {"role": "assistant", "content": [{"type": "text", "text": "4"}]}, + {"role": "user", "content": [{"type": "text", "text": "Thanks!"}]}, + ], + ), + # Test system message with media content + ( + [ + SystemMessage(content="Hi."), + HumanMessage( + content=[ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": "https://example.com/image.jpg", + }, + ] + ), + ], + [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "Hi.\nWhat's in this image?", + }, + { + "type": "image_url", + "image_url": "https://example.com/image.jpg", + }, + ], + }, + ], + ), + ], +) +def test_system_message_handling( + messages: List[BaseMessage], expected: List[Dict[str, Any]] +) -> None: + """Test that system messages are handled correctly.""" + result = convert_to_reka_messages(messages) + assert result == expected + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_multiple_system_messages_error() -> None: + """Test that multiple system messages raise an error.""" + messages = [ + SystemMessage(content="System message 1"), + SystemMessage(content="System message 2"), + HumanMessage(content="Hello"), + ] + + with pytest.raises(ValueError, match="Multiple system messages are not supported."): + convert_to_reka_messages(messages) + + +@pytest.mark.skip( + reason="Dependency conflict w/ other dependencies for urllib3 versions." +) +def test_get_num_tokens() -> None: + """Test that token counting works correctly for different input types.""" + llm = ChatReka() + import tiktoken + + encoding = tiktoken.get_encoding("cl100k_base") + + # Test string input + text = "What is the weather like today?" + expected_tokens = len(encoding.encode(text)) + assert llm.get_num_tokens(text) == expected_tokens + + # Test BaseMessage input + message = HumanMessage(content="What is the weather like today?") + assert isinstance(message.content, str) + expected_tokens = len(encoding.encode(message.content)) + assert llm.get_num_tokens(message) == expected_tokens + + # Test List[BaseMessage] input + messages = [ + SystemMessage(content="You are a helpful assistant."), + HumanMessage(content="Hi!"), + AIMessage(content="Hello! How can I help you today?"), + ] + expected_tokens = sum( + len(encoding.encode(msg.content)) + for msg in messages + if isinstance(msg.content, str) + ) + assert llm.get_num_tokens(messages) == expected_tokens + + # Test empty message list + assert llm.get_num_tokens([]) == 0 diff --git a/libs/community/tests/unit_tests/llms/test_imports.py b/libs/community/tests/unit_tests/llms/test_imports.py index df0fe68b59fd45..991996ec558027 100644 --- a/libs/community/tests/unit_tests/llms/test_imports.py +++ b/libs/community/tests/unit_tests/llms/test_imports.py @@ -78,6 +78,7 @@ "RWKV", "Replicate", "SagemakerEndpoint", + "SambaNovaCloud", "SambaStudio", "SelfHostedHuggingFaceLLM", "SelfHostedPipeline", diff --git a/libs/community/tests/unit_tests/test_dependencies.py b/libs/community/tests/unit_tests/test_dependencies.py index 6ebc2d3d9af63e..a5c8c7c12c100a 100644 --- a/libs/community/tests/unit_tests/test_dependencies.py +++ b/libs/community/tests/unit_tests/test_dependencies.py @@ -80,7 +80,7 @@ def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None: "duckdb-engine", "freezegun", "langchain-core", - "langchain-standard-tests", + "langchain-tests", "langchain", "lark", "pandas", diff --git a/libs/core/langchain_core/runnables/utils.py b/libs/core/langchain_core/runnables/utils.py index 06168a176d0a3d..a81ca7615a66f2 100644 --- a/libs/core/langchain_core/runnables/utils.py +++ b/libs/core/langchain_core/runnables/utils.py @@ -388,6 +388,7 @@ def get_lambda_source(func: Callable) -> Optional[str]: return name +@lru_cache(maxsize=256) def get_function_nonlocals(func: Callable) -> list[Any]: """Get the nonlocal variables accessed by a function. diff --git a/libs/core/poetry.lock b/libs/core/poetry.lock index 167daa9f8b31ac..2f76b5b2d99952 100644 --- a/libs/core/poetry.lock +++ b/libs/core/poetry.lock @@ -609,13 +609,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.6" +version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, - {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] [package.dependencies] @@ -1189,8 +1189,8 @@ files = [ ] [[package]] -name = "langchain-standard-tests" -version = "0.3.0" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -1225,13 +1225,13 @@ url = "../text-splitters" [[package]] name = "langsmith" -version = "0.1.142" +version = "0.1.143" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.142-py3-none-any.whl", hash = "sha256:f639ca23c9a0bb77af5fb881679b2f66ff1f21f19d0bebf4e51375e7585a8b38"}, - {file = "langsmith-0.1.142.tar.gz", hash = "sha256:f8a84d100f3052233ff0a1d66ae14c5dfc20b7e41a1601de011384f16ee6cb82"}, + {file = "langsmith-0.1.143-py3-none-any.whl", hash = "sha256:ba0d827269e9b03a90fababe41fa3e4e3f833300b95add10184f7e67167dde6f"}, + {file = "langsmith-0.1.143.tar.gz", hash = "sha256:4c5159e5cd84b3f8499433009e72d2076dd2daf6c044ac8a3611b30d0d0161c5"}, ] [package.dependencies] @@ -1574,6 +1574,70 @@ files = [ {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, ] +[[package]] +name = "numpy" +version = "2.1.3" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.10" +files = [ + {file = "numpy-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c894b4305373b9c5576d7a12b473702afdf48ce5369c074ba304cc5ad8730dff"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b47fbb433d3260adcd51eb54f92a2ffbc90a4595f8970ee00e064c644ac788f5"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:825656d0743699c529c5943554d223c021ff0494ff1442152ce887ef4f7561a1"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:6a4825252fcc430a182ac4dee5a505053d262c807f8a924603d411f6718b88fd"}, + {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e711e02f49e176a01d0349d82cb5f05ba4db7d5e7e0defd026328e5cfb3226d3"}, + {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78574ac2d1a4a02421f25da9559850d59457bac82f2b8d7a44fe83a64f770098"}, + {file = "numpy-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c7662f0e3673fe4e832fe07b65c50342ea27d989f92c80355658c7f888fcc83c"}, + {file = "numpy-2.1.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fa2d1337dc61c8dc417fbccf20f6d1e139896a30721b7f1e832b2bb6ef4eb6c4"}, + {file = "numpy-2.1.3-cp310-cp310-win32.whl", hash = "sha256:72dcc4a35a8515d83e76b58fdf8113a5c969ccd505c8a946759b24e3182d1f23"}, + {file = "numpy-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:ecc76a9ba2911d8d37ac01de72834d8849e55473457558e12995f4cd53e778e0"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4d1167c53b93f1f5d8a139a742b3c6f4d429b54e74e6b57d0eff40045187b15d"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c80e4a09b3d95b4e1cac08643f1152fa71a0a821a2d4277334c88d54b2219a41"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:576a1c1d25e9e02ed7fa5477f30a127fe56debd53b8d2c89d5578f9857d03ca9"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:973faafebaae4c0aaa1a1ca1ce02434554d67e628b8d805e61f874b84e136b09"}, + {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:762479be47a4863e261a840e8e01608d124ee1361e48b96916f38b119cfda04a"}, + {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f24b3d1ecc1eebfbf5d6051faa49af40b03be1aaa781ebdadcbc090b4539b"}, + {file = "numpy-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:17ee83a1f4fef3c94d16dc1802b998668b5419362c8a4f4e8a491de1b41cc3ee"}, + {file = "numpy-2.1.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15cb89f39fa6d0bdfb600ea24b250e5f1a3df23f901f51c8debaa6a5d122b2f0"}, + {file = "numpy-2.1.3-cp311-cp311-win32.whl", hash = "sha256:d9beb777a78c331580705326d2367488d5bc473b49a9bc3036c154832520aca9"}, + {file = "numpy-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:d89dd2b6da69c4fff5e39c28a382199ddedc3a5be5390115608345dec660b9e2"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f55ba01150f52b1027829b50d70ef1dafd9821ea82905b63936668403c3b471e"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:13138eadd4f4da03074851a698ffa7e405f41a0845a6b1ad135b81596e4e9958"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:a6b46587b14b888e95e4a24d7b13ae91fa22386c199ee7b418f449032b2fa3b8"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:0fa14563cc46422e99daef53d725d0c326e99e468a9320a240affffe87852564"}, + {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8637dcd2caa676e475503d1f8fdb327bc495554e10838019651b76d17b98e512"}, + {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2312b2aa89e1f43ecea6da6ea9a810d06aae08321609d8dc0d0eda6d946a541b"}, + {file = "numpy-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a38c19106902bb19351b83802531fea19dee18e5b37b36454f27f11ff956f7fc"}, + {file = "numpy-2.1.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02135ade8b8a84011cbb67dc44e07c58f28575cf9ecf8ab304e51c05528c19f0"}, + {file = "numpy-2.1.3-cp312-cp312-win32.whl", hash = "sha256:e6988e90fcf617da2b5c78902fe8e668361b43b4fe26dbf2d7b0f8034d4cafb9"}, + {file = "numpy-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:0d30c543f02e84e92c4b1f415b7c6b5326cbe45ee7882b6b77db7195fb971e3a"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96fe52fcdb9345b7cd82ecd34547fca4321f7656d500eca497eb7ea5a926692f"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f653490b33e9c3a4c1c01d41bc2aef08f9475af51146e4a7710c450cf9761598"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dc258a761a16daa791081d026f0ed4399b582712e6fc887a95af09df10c5ca57"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:016d0f6f5e77b0f0d45d77387ffa4bb89816b57c835580c3ce8e099ef830befe"}, + {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c181ba05ce8299c7aa3125c27b9c2167bca4a4445b7ce73d5febc411ca692e43"}, + {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5641516794ca9e5f8a4d17bb45446998c6554704d888f86df9b200e66bdcce56"}, + {file = "numpy-2.1.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ea4dedd6e394a9c180b33c2c872b92f7ce0f8e7ad93e9585312b0c5a04777a4a"}, + {file = "numpy-2.1.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0df3635b9c8ef48bd3be5f862cf71b0a4716fa0e702155c45067c6b711ddcef"}, + {file = "numpy-2.1.3-cp313-cp313-win32.whl", hash = "sha256:50ca6aba6e163363f132b5c101ba078b8cbd3fa92c7865fd7d4d62d9779ac29f"}, + {file = "numpy-2.1.3-cp313-cp313-win_amd64.whl", hash = "sha256:747641635d3d44bcb380d950679462fae44f54b131be347d5ec2bce47d3df9ed"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:996bb9399059c5b82f76b53ff8bb686069c05acc94656bb259b1d63d04a9506f"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:45966d859916ad02b779706bb43b954281db43e185015df6eb3323120188f9e4"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:baed7e8d7481bfe0874b566850cb0b85243e982388b7b23348c6db2ee2b2ae8e"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f7f672a3388133335589cfca93ed468509cb7b93ba3105fce780d04a6576a0"}, + {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7aac50327da5d208db2eec22eb11e491e3fe13d22653dce51b0f4109101b408"}, + {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4394bc0dbd074b7f9b52024832d16e019decebf86caf909d94f6b3f77a8ee3b6"}, + {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:50d18c4358a0a8a53f12a8ba9d772ab2d460321e6a93d6064fc22443d189853f"}, + {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:14e253bd43fc6b37af4921b10f6add6925878a42a0c5fe83daee390bca80bc17"}, + {file = "numpy-2.1.3-cp313-cp313t-win32.whl", hash = "sha256:08788d27a5fd867a663f6fc753fd7c3ad7e92747efc73c53bca2f19f8bc06f48"}, + {file = "numpy-2.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2564fbdf2b99b3f815f2107c1bbc93e2de8ee655a69c261363a1172a79a257d4"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4f2015dfe437dfebbfce7c85c7b53d81ba49e71ba7eadbf1df40c915af75979f"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:3522b0dfe983a575e6a9ab3a4a4dfe156c3e428468ff08ce582b9bb6bd1d71d4"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c006b607a865b07cd981ccb218a04fc86b600411d83d6fc261357f1c0966755d"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e14e26956e6f1696070788252dcdff11b4aca4c3e8bd166e0df1bb8f315a67cb"}, + {file = "numpy-2.1.3.tar.gz", hash = "sha256:aa08e04e08aaf974d4458def539dece0d28146d866a39da5639596f4921fd761"}, +] + [[package]] name = "orjson" version = "3.10.11" @@ -2961,4 +3025,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "3c680384ebb62b873c70201979927665c8fcce32dbf1df7457d12e83fba15ac2" +content-hash = "02bfef4d884d17f83dde79a4076a3d07f94a1b3dbf2414f07379dab64a78e183" diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 281560993c764e..eb0b74f1828d79 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -1,10 +1,10 @@ [build-system] -requires = [ "poetry-core>=1.0.0",] +requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry] name = "langchain-core" -version = "0.3.18" +version = "0.3.19" description = "Building applications with LLMs through composability" authors = [] license = "MIT" @@ -12,10 +12,16 @@ readme = "README.md" repository = "https://github.com/langchain-ai/langchain" [tool.mypy] -exclude = [ "notebooks", "examples", "example_data", "langchain_core/pydantic", "tests/unit_tests/utils/test_function_calling.py",] +exclude = [ + "notebooks", + "examples", + "example_data", + "langchain_core/pydantic", + "tests/unit_tests/utils/test_function_calling.py", +] disallow_untyped_defs = "True" [[tool.mypy.overrides]] -module = [ "numpy", "pytest",] +module = ["numpy", "pytest"] ignore_missing_imports = true [tool.ruff] @@ -44,17 +50,53 @@ python = ">=3.12.4" [tool.poetry.extras] [tool.ruff.lint] -select = [ "ASYNC", "B", "C4", "COM", "DJ", "E", "EM", "EXE", "F", "FLY", "FURB", "I", "ICN", "INT", "LOG", "N", "NPY", "PD", "PIE", "Q", "RSE", "S", "SIM", "SLOT", "T10", "T201", "TID", "UP", "W", "YTT",] -ignore = [ "COM812", "UP007", "W293", "S101", "S110", "S112",] +select = [ + "ASYNC", + "B", + "C4", + "COM", + "DJ", + "E", + "EM", + "EXE", + "F", + "FLY", + "FURB", + "I", + "ICN", + "INT", + "LOG", + "N", + "NPY", + "PD", + "PIE", + "Q", + "RSE", + "S", + "SIM", + "SLOT", + "T10", + "T201", + "TID", + "UP", + "W", + "YTT", +] +ignore = ["COM812", "UP007", "W293", "S101", "S110", "S112"] [tool.coverage.run] -omit = [ "tests/*",] +omit = ["tests/*"] [tool.pytest.ini_options] addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5" -markers = [ "requires: mark tests as requiring a specific library", "compile: mark placeholder test used to compile integration tests without running them",] +markers = [ + "requires: mark tests as requiring a specific library", + "compile: mark placeholder test used to compile integration tests without running them", +] asyncio_mode = "auto" -filterwarnings = [ "ignore::langchain_core._api.beta_decorator.LangChainBetaWarning",] +filterwarnings = [ + "ignore::langchain_core._api.beta_decorator.LangChainBetaWarning", +] [tool.poetry.group.lint] optional = true @@ -72,14 +114,19 @@ optional = true optional = true [tool.ruff.lint.pep8-naming] -classmethod-decorators = [ "classmethod", "langchain_core.utils.pydantic.pre_init", "pydantic.field_validator", "pydantic.v1.root_validator",] +classmethod-decorators = [ + "classmethod", + "langchain_core.utils.pydantic.pre_init", + "pydantic.field_validator", + "pydantic.v1.root_validator", +] [tool.ruff.lint.per-file-ignores] -"tests/unit_tests/prompts/test_chat.py" = [ "E501",] -"tests/unit_tests/runnables/test_runnable.py" = [ "E501",] -"tests/unit_tests/runnables/test_graph.py" = [ "E501",] -"tests/**" = [ "S",] -"scripts/**" = [ "S",] +"tests/unit_tests/prompts/test_chat.py" = ["E501"] +"tests/unit_tests/runnables/test_runnable.py" = ["E501"] +"tests/unit_tests/runnables/test_graph.py" = ["E501"] +"tests/**" = ["S"] +"scripts/**" = ["S"] [tool.poetry.group.lint.dependencies] ruff = "^0.5" @@ -113,7 +160,7 @@ version = "^1.24.0" python = "<3.12" [[tool.poetry.group.test.dependencies.numpy]] -version = "^1.26.0" +version = ">=1.26.0,<3" python = ">=3.12" @@ -125,6 +172,6 @@ path = "../text-splitters" develop = true -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../standard-tests" develop = true diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index 3ceb6d136b1bce..7ee448147cd35d 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -1988,7 +1988,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.3.15" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -2013,7 +2013,7 @@ url = "../core" [[package]] name = "langchain-openai" -version = "0.2.5" +version = "0.2.8" description = "An integration package connecting OpenAI and LangChain" optional = true python-versions = ">=3.9,<4.0" @@ -2021,8 +2021,8 @@ files = [] develop = true [package.dependencies] -langchain-core = "^0.3.15" -openai = "^1.52.0" +langchain-core = "^0.3.17" +openai = "^1.54.0" tiktoken = ">=0.7,<1" [package.source] @@ -2030,8 +2030,8 @@ type = "directory" url = "../partners/openai" [[package]] -name = "langchain-standard-tests" -version = "0.1.1" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -2040,7 +2040,7 @@ develop = true [package.dependencies] httpx = "^0.27.0" -langchain-core = "^0.3.0" +langchain-core = "^0.3.15" pytest = ">=7,<9" syrupy = "^4" @@ -2050,7 +2050,7 @@ url = "../standard-tests" [[package]] name = "langchain-text-splitters" -version = "0.3.1" +version = "0.3.2" description = "LangChain text splitting utilities" optional = false python-versions = ">=3.9,<4.0" @@ -2575,13 +2575,13 @@ files = [ [[package]] name = "openai" -version = "1.53.0" +version = "1.54.4" description = "The official Python library for the openai API" optional = true -python-versions = ">=3.7.1" +python-versions = ">=3.8" files = [ - {file = "openai-1.53.0-py3-none-any.whl", hash = "sha256:20f408c32fc5cb66e60c6882c994cdca580a5648e10045cd840734194f033418"}, - {file = "openai-1.53.0.tar.gz", hash = "sha256:be2c4e77721b166cce8130e544178b7d579f751b4b074ffbaade3854b6f85ec5"}, + {file = "openai-1.54.4-py3-none-any.whl", hash = "sha256:0d95cef99346bf9b6d7fbf57faf61a673924c3e34fa8af84c9ffe04660673a7e"}, + {file = "openai-1.54.4.tar.gz", hash = "sha256:50f3656e45401c54e973fa05dc29f3f0b0d19348d685b2f7ddb4d92bf7b1b6bf"}, ] [package.dependencies] @@ -4073,61 +4073,14 @@ description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, ] @@ -4900,4 +4853,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "7d093bafda2acfc3eae68b365b8a9d3801ad77d6cd47aa8dd3cb8298e3168cb3" +content-hash = "e0b37255a15bdc97b4f164a1c0af09bc89b812aebff988e0ff9728038a09a4b3" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 79fb6cb232565d..cab42740df9b6e 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = [ "poetry-core>=1.0.0",] +requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry] @@ -12,12 +12,12 @@ readme = "README.md" repository = "https://github.com/langchain-ai/langchain" [tool.ruff] -exclude = [ "tests/integration_tests/examples/non-utf8-encoding.py",] +exclude = ["tests/integration_tests/examples/non-utf8-encoding.py"] [tool.mypy] ignore_missing_imports = "True" disallow_untyped_defs = "True" -exclude = [ "notebooks", "examples", "example_data",] +exclude = ["notebooks", "examples", "example_data"] [tool.codespell] skip = ".git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package-lock.json,example_data,_dist,examples,*.trig" @@ -43,24 +43,32 @@ PyYAML = ">=5.3" aiohttp = "^3.8.3" tenacity = ">=8.1.0,!=8.4.0,<10" [[tool.poetry.dependencies.numpy]] -version = "^1" +version = ">=1.22.4,<2" python = "<3.12" [[tool.poetry.dependencies.numpy]] -version = "^1.26.0" +version = ">=1.26.2,<2" python = ">=3.12" [tool.ruff.lint] -select = [ "E", "F", "I", "T201",] +select = ["E", "F", "I", "T201"] [tool.coverage.run] -omit = [ "tests/*",] +omit = ["tests/*"] [tool.pytest.ini_options] addopts = "--strict-markers --strict-config --durations=5 --snapshot-warn-unused -vv" -markers = [ "requires: mark tests as requiring a specific library", "scheduled: mark tests to run in scheduled testing", "compile: mark placeholder test used to compile integration tests without running them",] +markers = [ + "requires: mark tests as requiring a specific library", + "scheduled: mark tests to run in scheduled testing", + "compile: mark placeholder test used to compile integration tests without running them", +] asyncio_mode = "auto" -filterwarnings = [ "ignore::langchain_core._api.beta_decorator.LangChainBetaWarning", "ignore::langchain_core._api.deprecation.LangChainDeprecationWarning:tests", "ignore::langchain_core._api.deprecation.LangChainPendingDeprecationWarning:tests",] +filterwarnings = [ + "ignore::langchain_core._api.beta_decorator.LangChainBetaWarning", + "ignore::langchain_core._api.deprecation.LangChainDeprecationWarning:tests", + "ignore::langchain_core._api.deprecation.LangChainPendingDeprecationWarning:tests", +] [tool.poetry.dependencies.async-timeout] version = "^4.0.0" @@ -142,7 +150,7 @@ jupyter = "^1.0.0" playwright = "^1.28.0" setuptools = "^67.6.1" -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../standard-tests" develop = true diff --git a/libs/langchain/tests/unit_tests/test_dependencies.py b/libs/langchain/tests/unit_tests/test_dependencies.py index 42a6d067de5ac1..664e313e0f5aa1 100644 --- a/libs/langchain/tests/unit_tests/test_dependencies.py +++ b/libs/langchain/tests/unit_tests/test_dependencies.py @@ -79,7 +79,7 @@ def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None: "duckdb-engine", "freezegun", "langchain-core", - "langchain-standard-tests", + "langchain-tests", "langchain-text-splitters", "langchain-openai", "lark", diff --git a/libs/partners/anthropic/poetry.lock b/libs/partners/anthropic/poetry.lock index d3359270e4f741..24666fc6657455 100644 --- a/libs/partners/anthropic/poetry.lock +++ b/libs/partners/anthropic/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -451,7 +451,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.3.17" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -475,8 +475,8 @@ type = "directory" url = "../../core" [[package]] -name = "langchain-standard-tests" -version = "0.3.0" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -1140,4 +1140,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "c646c08244b1b08c334f6ba7baca5e6fa78d801eae4f957f2770a190c3b9f2ac" +content-hash = "ee4aaa06307b4dc7f7913147bf58f3f36245193c9d4a79c43aba07641f7b6ab9" diff --git a/libs/partners/anthropic/pyproject.toml b/libs/partners/anthropic/pyproject.toml index a5c64e78ee6a71..cda312230357fa 100644 --- a/libs/partners/anthropic/pyproject.toml +++ b/libs/partners/anthropic/pyproject.toml @@ -79,7 +79,7 @@ mypy = "^1.10" path = "../../core" develop = true -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../../standard-tests" develop = true diff --git a/libs/partners/fireworks/poetry.lock b/libs/partners/fireworks/poetry.lock index 33b0cdf669c5ad..8c210328124648 100644 --- a/libs/partners/fireworks/poetry.lock +++ b/libs/partners/fireworks/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -670,7 +670,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.3.15" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -694,8 +694,8 @@ type = "directory" url = "../../core" [[package]] -name = "langchain-standard-tests" -version = "0.1.1" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -704,7 +704,7 @@ develop = true [package.dependencies] httpx = "^0.27.0" -langchain-core = "^0.3.0" +langchain-core = "^0.3.15" pytest = ">=7,<9" syrupy = "^4" @@ -1716,4 +1716,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "9664344ff6792f26b7b2067fd85cfe111d31cf6ba40842297f0c8b36431a9028" +content-hash = "40e27b8e7c09a02e0ce22d7134796a59586693612c784be8d17cea70d639f3d2" diff --git a/libs/partners/fireworks/pyproject.toml b/libs/partners/fireworks/pyproject.toml index e51c921fbbf6e8..4b60c14251b2d3 100644 --- a/libs/partners/fireworks/pyproject.toml +++ b/libs/partners/fireworks/pyproject.toml @@ -79,7 +79,7 @@ types-requests = "^2" path = "../../core" develop = true -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../../standard-tests" develop = true diff --git a/libs/partners/groq/poetry.lock b/libs/partners/groq/poetry.lock index 0cb0849215ee9c..cb3b6c1e60671f 100644 --- a/libs/partners/groq/poetry.lock +++ b/libs/partners/groq/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -339,7 +339,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.3.15" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -363,8 +363,8 @@ type = "directory" url = "../../core" [[package]] -name = "langchain-standard-tests" -version = "0.1.1" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -373,7 +373,7 @@ develop = true [package.dependencies] httpx = "^0.27.0" -langchain-core = "^0.3.0" +langchain-core = "^0.3.15" pytest = ">=7,<9" syrupy = "^4" @@ -1003,4 +1003,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "23a501c80ee0d64d676e493060f0cb6a3679d50a5f72e353b32a003c2c996260" +content-hash = "a990f45be00407eb6dcb11b1db4852dad61f9c8999f402b9a18f6029f905c9f3" diff --git a/libs/partners/groq/pyproject.toml b/libs/partners/groq/pyproject.toml index b4c0fd272ea8af..a1687e123f9cde 100644 --- a/libs/partners/groq/pyproject.toml +++ b/libs/partners/groq/pyproject.toml @@ -71,7 +71,7 @@ mypy = "^1.10" path = "../../core" develop = true -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../../standard-tests" develop = true diff --git a/libs/partners/huggingface/poetry.lock b/libs/partners/huggingface/poetry.lock index 16d86f7747c557..245d3723c28ca5 100644 --- a/libs/partners/huggingface/poetry.lock +++ b/libs/partners/huggingface/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -1146,19 +1146,19 @@ test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout" [[package]] name = "langchain" -version = "0.3.6" +version = "0.3.7" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "langchain-0.3.6-py3-none-any.whl", hash = "sha256:6e453f6c26dfd8f800ba5eb3ecbd21d283d5b6ccad422179b2933a962c1a6563"}, - {file = "langchain-0.3.6.tar.gz", hash = "sha256:0b0e2dc3be7b49eb3ca2aa21341bb204ed74450e34b3041345820454e21bcdc8"}, + {file = "langchain-0.3.7-py3-none-any.whl", hash = "sha256:cf4af1d5751dacdc278df3de1ff3cbbd8ca7eb55d39deadccdd7fb3d3ee02ac0"}, + {file = "langchain-0.3.7.tar.gz", hash = "sha256:2e4f83bf794ba38562f7ba0ede8171d7e28a583c0cec6f8595cfe72147d336b2"}, ] [package.dependencies] aiohttp = ">=3.8.3,<4.0.0" async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} -langchain-core = ">=0.3.14,<0.4.0" +langchain-core = ">=0.3.15,<0.4.0" langchain-text-splitters = ">=0.3.0,<0.4.0" langsmith = ">=0.1.17,<0.2.0" numpy = [ @@ -1173,7 +1173,7 @@ tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<10" [[package]] name = "langchain-community" -version = "0.3.4" +version = "0.3.7" description = "Community contributed LangChain integrations." optional = false python-versions = ">=3.9,<4.0" @@ -1184,12 +1184,12 @@ develop = true aiohttp = "^3.8.3" dataclasses-json = ">= 0.5.7, < 0.7" httpx-sse = "^0.4.0" -langchain = "^0.3.6" -langchain-core = "^0.3.14" +langchain = "^0.3.7" +langchain-core = "^0.3.17" langsmith = "^0.1.125" numpy = [ - {version = ">=1,<2", markers = "python_version < \"3.12\""}, - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.22.4,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.2,<2", markers = "python_version >= \"3.12\""}, ] pydantic-settings = "^2.4.0" PyYAML = ">=5.3" @@ -1203,7 +1203,7 @@ url = "../../community" [[package]] name = "langchain-core" -version = "0.3.15" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -1227,8 +1227,8 @@ type = "directory" url = "../../core" [[package]] -name = "langchain-standard-tests" -version = "0.1.1" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -1237,7 +1237,7 @@ develop = true [package.dependencies] httpx = "^0.27.0" -langchain-core = "^0.3.0" +langchain-core = "^0.3.15" pytest = ">=7,<9" syrupy = "^4" @@ -1780,7 +1780,6 @@ description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83"}, {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57"}, {file = "nvidia_nvjitlink_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:fd9020c501d27d135f983c6d3e244b197a7ccad769e34df53a42e276b0e25fa1"}, ] @@ -2984,6 +2983,11 @@ files = [ {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12"}, + {file = "scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f"}, {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, @@ -3871,4 +3875,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "f1f436e0e520800571d8861bca1da02f1ae5dd67b449f680e46a2c3e5ee70933" +content-hash = "2d468da765374084b6ad008d1b3e6d4d12f3c1d696cea84f2245f49012691272" diff --git a/libs/partners/huggingface/pyproject.toml b/libs/partners/huggingface/pyproject.toml index 08d8c8f83818a2..c725ed90afe793 100644 --- a/libs/partners/huggingface/pyproject.toml +++ b/libs/partners/huggingface/pyproject.toml @@ -94,7 +94,7 @@ mypy = "^1.10" path = "../../core" develop = true -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../../standard-tests" develop = true diff --git a/libs/partners/huggingface/tests/integration_tests/test_standard.py b/libs/partners/huggingface/tests/integration_tests/test_standard.py index fd120f3f0f63af..34392e979f437c 100644 --- a/libs/partners/huggingface/tests/integration_tests/test_standard.py +++ b/libs/partners/huggingface/tests/integration_tests/test_standard.py @@ -53,6 +53,10 @@ def test_stop_sequence(self, model: BaseChatModel) -> None: def test_tool_calling(self, model: BaseChatModel) -> None: super().test_tool_calling(model) + @pytest.mark.xfail(reason=("Not implemented")) + async def test_tool_calling_async(self, model: BaseChatModel) -> None: + await super().test_tool_calling_async(model) + @pytest.mark.xfail(reason=("Not implemented")) def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None: super().test_tool_calling_with_no_arguments(model) diff --git a/libs/partners/mistralai/poetry.lock b/libs/partners/mistralai/poetry.lock index dfa60b117b3811..f6cffb97889e7e 100644 --- a/libs/partners/mistralai/poetry.lock +++ b/libs/partners/mistralai/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -409,7 +409,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.3.15" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -433,8 +433,8 @@ type = "directory" url = "../../core" [[package]] -name = "langchain-standard-tests" -version = "0.1.1" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -443,7 +443,7 @@ develop = true [package.dependencies] httpx = "^0.27.0" -langchain-core = "^0.3.0" +langchain-core = "^0.3.15" pytest = ">=7,<9" syrupy = "^4" @@ -1193,4 +1193,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "7501ae59586e8c4ed95ec24121170926ff9ab5580665a80a7b7ae2efcbff5f18" +content-hash = "4a86b5a8d678aa583d949272e47d16b9a1ecde22ce590d903bcb1a386dc4ecdf" diff --git a/libs/partners/mistralai/pyproject.toml b/libs/partners/mistralai/pyproject.toml index 357302642c7d6e..b0e4d83cceee84 100644 --- a/libs/partners/mistralai/pyproject.toml +++ b/libs/partners/mistralai/pyproject.toml @@ -75,7 +75,7 @@ mypy = "^1.10" path = "../../core" develop = true -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../../standard-tests" develop = true diff --git a/libs/partners/ollama/langchain_ollama/chat_models.py b/libs/partners/ollama/langchain_ollama/chat_models.py index 7f887280b000dc..d6ece0d66a854e 100644 --- a/libs/partners/ollama/langchain_ollama/chat_models.py +++ b/libs/partners/ollama/langchain_ollama/chat_models.py @@ -327,7 +327,7 @@ class Multiply(BaseModel): """Base url the model is hosted under.""" client_kwargs: Optional[dict] = {} - """Additional kwargs to pass to the httpx Client. + """Additional kwargs to pass to the httpx Client. For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html) """ @@ -475,26 +475,27 @@ async def _acreate_chat_stream( params[key] = kwargs[key] params["options"]["stop"] = stop - if "tools" in kwargs: - yield await self._async_client.chat( - model=params["model"], - messages=ollama_messages, - stream=False, - options=Options(**params["options"]), - keep_alive=params["keep_alive"], - format=params["format"], - tools=kwargs["tools"], - ) # type:ignore - else: - async for part in await self._async_client.chat( - model=params["model"], - messages=ollama_messages, - stream=True, - options=Options(**params["options"]), - keep_alive=params["keep_alive"], - format=params["format"], - ): # type:ignore + + tools = kwargs.get("tools", None) + stream = tools is None or len(tools) == 0 + + chat_params = { + "model": params["model"], + "messages": ollama_messages, + "stream": stream, + "options": Options(**params["options"]), + "keep_alive": params["keep_alive"], + "format": params["format"], + } + + if tools is not None: + chat_params["tools"] = tools + + if stream: + async for part in await self._async_client.chat(**chat_params): yield part + else: + yield await self._async_client.chat(**chat_params) def _create_chat_stream( self, @@ -513,25 +514,26 @@ def _create_chat_stream( params[key] = kwargs[key] params["options"]["stop"] = stop - if "tools" in kwargs: - yield self._client.chat( - model=params["model"], - messages=ollama_messages, - stream=False, - options=Options(**params["options"]), - keep_alive=params["keep_alive"], - format=params["format"], - tools=kwargs["tools"], - ) + + tools = kwargs.get("tools", None) + stream = tools is None or len(tools) == 0 + + chat_params = { + "model": params["model"], + "messages": ollama_messages, + "stream": stream, + "options": Options(**params["options"]), + "keep_alive": params["keep_alive"], + "format": params["format"], + } + + if tools is not None: + chat_params["tools"] = tools + + if stream: + yield from self._client.chat(**chat_params) else: - yield from self._client.chat( - model=params["model"], - messages=ollama_messages, - stream=True, - options=Options(**params["options"]), - keep_alive=params["keep_alive"], - format=params["format"], - ) + yield self._client.chat(**chat_params) def _chat_stream_with_aggregation( self, diff --git a/libs/partners/ollama/poetry.lock b/libs/partners/ollama/poetry.lock index cbc19380975422..e07a4ff15f36c8 100644 --- a/libs/partners/ollama/poetry.lock +++ b/libs/partners/ollama/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -309,7 +309,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.3.15" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -333,8 +333,8 @@ type = "directory" url = "../../core" [[package]] -name = "langchain-standard-tests" -version = "0.1.1" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -343,7 +343,7 @@ develop = true [package.dependencies] httpx = "^0.27.0" -langchain-core = "^0.3.0" +langchain-core = "^0.3.15" pytest = ">=7,<9" syrupy = "^4" @@ -983,4 +983,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "ca6805780e6e50b39b5cf93779982517d384e6acb5445d75008b5fc4f469a9fc" +content-hash = "a5c369fbd4c7e6fea27200bec1537de641ac19b5ede12eaefec73d3461a440c7" diff --git a/libs/partners/ollama/pyproject.toml b/libs/partners/ollama/pyproject.toml index 0a995d7a4ee086..9ec652e57d3ea0 100644 --- a/libs/partners/ollama/pyproject.toml +++ b/libs/partners/ollama/pyproject.toml @@ -37,7 +37,7 @@ select = [ convention = "google" [tool.ruff.lint.per-file-ignores] -"tests/**" = ["D"] # ignore docstring checks for tests +"tests/**" = ["D"] # ignore docstring checks for tests [tool.coverage.run] omit = ["tests/*"] @@ -45,7 +45,7 @@ omit = ["tests/*"] [tool.pytest.ini_options] addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5" markers = [ - "compile: mark placeholder test used to compile integration tests without running them", + "compile: mark placeholder test used to compile integration tests without running them", ] asyncio_mode = "auto" @@ -86,7 +86,7 @@ mypy = "^1.7.1" path = "../../core" develop = true -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../../standard-tests" develop = true diff --git a/libs/partners/openai/poetry.lock b/libs/partners/openai/poetry.lock index 1f724756d7ff08..17cd5f497af25e 100644 --- a/libs/partners/openai/poetry.lock +++ b/libs/partners/openai/poetry.lock @@ -493,7 +493,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.3.17" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -517,8 +517,8 @@ type = "directory" url = "../../core" [[package]] -name = "langchain-standard-tests" -version = "0.3.0" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -1562,4 +1562,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "193b209048042991c70a06289c41e67d44cc104c970a70857beacc8a51f0459b" +content-hash = "79ff1e8bd2a97aefd574186299e51f9e2487ee51bcde1e199d6e786c3fdde8ea" diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index 8acfd163d11972..492cbc63eac4b1 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = [ "poetry-core>=1.0.0",] +requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry] @@ -28,20 +28,26 @@ openai = "^1.54.0" tiktoken = ">=0.7,<1" [tool.ruff.lint] -select = [ "E", "F", "I", "T201",] +select = ["E", "F", "I", "T201"] [tool.ruff.format] docstring-code-format = true skip-magic-trailing-comma = true [tool.coverage.run] -omit = [ "tests/*",] +omit = ["tests/*"] [tool.pytest.ini_options] addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5 --cov=langchain_openai" -markers = [ "requires: mark tests as requiring a specific library", "compile: mark placeholder test used to compile integration tests without running them", "scheduled: mark tests to run in scheduled testing",] +markers = [ + "requires: mark tests as requiring a specific library", + "compile: mark placeholder test used to compile integration tests without running them", + "scheduled: mark tests to run in scheduled testing", +] asyncio_mode = "auto" -filterwarnings = [ "ignore::langchain_core._api.beta_decorator.LangChainBetaWarning",] +filterwarnings = [ + "ignore::langchain_core._api.beta_decorator.LangChainBetaWarning", +] [tool.poetry.group.test] optional = true @@ -100,7 +106,7 @@ types-tqdm = "^4.66.0.5" path = "../../core" develop = true -[tool.poetry.group.test.dependencies.langchain-standard-tests] +[tool.poetry.group.test.dependencies.langchain-tests] path = "../../standard-tests" develop = true diff --git a/libs/partners/xai/poetry.lock b/libs/partners/xai/poetry.lock index 0502c9065a3b8b..4d72a194467c2e 100644 --- a/libs/partners/xai/poetry.lock +++ b/libs/partners/xai/poetry.lock @@ -720,7 +720,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.3.18" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -762,8 +762,8 @@ type = "directory" url = "../openai" [[package]] -name = "langchain-standard-tests" -version = "0.3.0" +name = "langchain-tests" +version = "0.3.1" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9,<4.0" @@ -2071,4 +2071,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "1f77714ec9420ce148ab90de835585ddf87ce68f2d28240f63fcea47c9bccf6c" +content-hash = "df184c08b8561a31c587e9fd643cd80fa9b9fee422dfe73e0243bafe6e0e6964" diff --git a/libs/partners/xai/pyproject.toml b/libs/partners/xai/pyproject.toml index a6dbc23286c5c8..150093830a0460 100644 --- a/libs/partners/xai/pyproject.toml +++ b/libs/partners/xai/pyproject.toml @@ -74,7 +74,7 @@ pytest-asyncio = "^0.21.1" docarray = "^0.32.1" langchain-openai = { path = "../openai", develop = true } langchain-core = { path = "../../core", develop = true } -langchain-standard-tests = { path = "../../standard-tests", develop = true } +langchain-tests = { path = "../../standard-tests", develop = true } [tool.poetry.group.codespell.dependencies] codespell = "^2.2.0" diff --git a/libs/standard-tests/Makefile b/libs/standard-tests/Makefile index da9f71396263ce..a48f80928b993c 100644 --- a/libs/standard-tests/Makefile +++ b/libs/standard-tests/Makefile @@ -30,13 +30,13 @@ lint_tests: PYTHON_FILES=tests lint_tests: MYPY_CACHE=.mypy_cache_test lint lint_diff lint_package lint_tests: - [ "$(PYTHON_FILES)" = "" ] || poetry run ruff $(PYTHON_FILES) + [ "$(PYTHON_FILES)" = "" ] || poetry run ruff check $(PYTHON_FILES) [ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff [ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE) format format_diff: [ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) - [ "$(PYTHON_FILES)" = "" ] || poetry run ruff --select I --fix $(PYTHON_FILES) + [ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I --fix $(PYTHON_FILES) spell_check: poetry run codespell --toml pyproject.toml diff --git a/libs/standard-tests/README.md b/libs/standard-tests/README.md index e7fbcce731d4e8..0060203a1e2bea 100644 --- a/libs/standard-tests/README.md +++ b/libs/standard-tests/README.md @@ -1,23 +1,27 @@ -# langchain-standard-tests +# langchain-tests -This is an INTERNAL library for the LangChain project. It contains the base classes for +This is a testing library for LangChain integrations. It contains the base classes for a standard set of tests. ## Installation -This package will NOT be regularly published to pypi. It is intended to be installed -directly from github at test time. +We encourage pinning your version to a specific version in order to avoid breaking +your CI when we publish new tests. We recommend upgrading to the latest version +periodically to make sure you have the latest tests. + +Not pinning your version will ensure you always have the latest tests, but it may +also break your CI if we introduce tests that your integration doesn't pass. Pip: ```bash - pip install git+https://github.com/langchain-ai/langchain.git#subdirectory=libs/standard-tests + pip install -U langchain-tests ``` Poetry: ```bash - poetry add git+https://github.com/langchain-ai/langchain.git#subdirectory=libs/standard-tests + poetry add langchain-tests ``` ## Usage diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/__init__.py b/libs/standard-tests/langchain_standard_tests/integration_tests/__init__.py index ef9b6b05acfb86..ec26de72a4a7ec 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/__init__.py +++ b/libs/standard-tests/langchain_standard_tests/integration_tests/__init__.py @@ -10,6 +10,7 @@ "chat_models", "vectorstores", "embeddings", + "tools", ] for module in modules: @@ -17,14 +18,21 @@ f"langchain_standard_tests.integration_tests.{module}" ) -from langchain_standard_tests.integration_tests.chat_models import ( - ChatModelIntegrationTests, -) -from langchain_standard_tests.integration_tests.embeddings import ( - EmbeddingsIntegrationTests, -) +from .base_store import BaseStoreAsyncTests, BaseStoreSyncTests +from .cache import AsyncCacheTestSuite, SyncCacheTestSuite +from .chat_models import ChatModelIntegrationTests +from .embeddings import EmbeddingsIntegrationTests +from .tools import ToolsIntegrationTests +from .vectorstores import AsyncReadWriteTestSuite, ReadWriteTestSuite __all__ = [ "ChatModelIntegrationTests", "EmbeddingsIntegrationTests", + "ToolsIntegrationTests", + "BaseStoreAsyncTests", + "BaseStoreSyncTests", + "AsyncCacheTestSuite", + "SyncCacheTestSuite", + "AsyncReadWriteTestSuite", + "ReadWriteTestSuite", ] diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py index 5c0c8c6628c041..9eea91aebbe44c 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py @@ -286,6 +286,27 @@ def test_tool_calling(self, model: BaseChatModel) -> None: assert isinstance(full, AIMessage) _validate_tool_call_message(full) + async def test_tool_calling_async(self, model: BaseChatModel) -> None: + if not self.has_tool_calling: + pytest.skip("Test requires tool calling.") + if self.tool_choice_value == "tool_name": + tool_choice: Optional[str] = "magic_function" + else: + tool_choice = self.tool_choice_value + model_with_tools = model.bind_tools([magic_function], tool_choice=tool_choice) + + # Test ainvoke + query = "What is the value of magic_function(3)? Use the tool." + result = await model_with_tools.ainvoke(query) + _validate_tool_call_message(result) + + # Test astream + full: Optional[BaseMessageChunk] = None + async for chunk in model_with_tools.astream(query): + full = chunk if full is None else full + chunk # type: ignore + assert isinstance(full, AIMessage) + _validate_tool_call_message(full) + def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None: if not self.has_tool_calling: pytest.skip("Test requires tool calling.") diff --git a/libs/standard-tests/langchain_standard_tests/unit_tests/__init__.py b/libs/standard-tests/langchain_standard_tests/unit_tests/__init__.py index 14715260ea5398..df94c79ae61ccd 100644 --- a/libs/standard-tests/langchain_standard_tests/unit_tests/__init__.py +++ b/libs/standard-tests/langchain_standard_tests/unit_tests/__init__.py @@ -7,11 +7,14 @@ modules = [ "chat_models", "embeddings", + "tools", ] for module in modules: pytest.register_assert_rewrite(f"langchain_standard_tests.unit_tests.{module}") -from langchain_standard_tests.unit_tests.chat_models import ChatModelUnitTests +from .chat_models import ChatModelUnitTests +from .embeddings import EmbeddingsUnitTests +from .tools import ToolsUnitTests -__all__ = ["ChatModelUnitTests", "EmbeddingsUnitTests"] +__all__ = ["ChatModelUnitTests", "EmbeddingsUnitTests", "ToolsUnitTests"] diff --git a/libs/standard-tests/langchain_standard_tests/unit_tests/chat_models.py b/libs/standard-tests/langchain_standard_tests/unit_tests/chat_models.py index 1321eb6215188a..9bde7fbf3e1a53 100644 --- a/libs/standard-tests/langchain_standard_tests/unit_tests/chat_models.py +++ b/libs/standard-tests/langchain_standard_tests/unit_tests/chat_models.py @@ -79,8 +79,7 @@ def my_adder(a: int, b: int) -> int: class ChatModelTests(BaseStandardTests): @property @abstractmethod - def chat_model_class(self) -> Type[BaseChatModel]: - ... + def chat_model_class(self) -> Type[BaseChatModel]: ... @property def chat_model_params(self) -> dict: @@ -244,17 +243,19 @@ class ExpectedParams(BaseModelV1): ls_params = model._get_ls_params() try: - ExpectedParams(**ls_params) + ExpectedParams(**ls_params) # type: ignore except ValidationErrorV1 as e: pytest.fail(f"Validation error: {e}") # Test optional params model = self.chat_model_class( - max_tokens=10, stop=["test"], **self.chat_model_params + max_tokens=10, + stop=["test"], + **self.chat_model_params, # type: ignore ) ls_params = model._get_ls_params() try: - ExpectedParams(**ls_params) + ExpectedParams(**ls_params) # type: ignore except ValidationErrorV1 as e: pytest.fail(f"Validation error: {e}") diff --git a/libs/standard-tests/langchain_standard_tests/unit_tests/embeddings.py b/libs/standard-tests/langchain_standard_tests/unit_tests/embeddings.py index 4638af51745be4..39c6e941c5d7e1 100644 --- a/libs/standard-tests/langchain_standard_tests/unit_tests/embeddings.py +++ b/libs/standard-tests/langchain_standard_tests/unit_tests/embeddings.py @@ -13,8 +13,7 @@ class EmbeddingsTests(BaseStandardTests): @property @abstractmethod - def embeddings_class(self) -> Type[Embeddings]: - ... + def embeddings_class(self) -> Type[Embeddings]: ... @property def embedding_model_params(self) -> dict: diff --git a/libs/standard-tests/langchain_standard_tests/unit_tests/tools.py b/libs/standard-tests/langchain_standard_tests/unit_tests/tools.py index 61f54e23bec8ea..0decc51230fbf5 100644 --- a/libs/standard-tests/langchain_standard_tests/unit_tests/tools.py +++ b/libs/standard-tests/langchain_standard_tests/unit_tests/tools.py @@ -13,8 +13,7 @@ class ToolsTests(BaseStandardTests): @property @abstractmethod - def tool_constructor(self) -> Union[Type[BaseTool], Callable]: - ... + def tool_constructor(self) -> Union[Type[BaseTool], Callable]: ... @property def tool_constructor_params(self) -> dict: diff --git a/libs/standard-tests/poetry.lock b/libs/standard-tests/poetry.lock index 0c74b37a6cb130..1448688ea4b7de 100644 --- a/libs/standard-tests/poetry.lock +++ b/libs/standard-tests/poetry.lock @@ -213,13 +213,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.6" +version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, - {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] [package.dependencies] @@ -309,7 +309,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.3.15" +version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -334,13 +334,13 @@ url = "../core" [[package]] name = "langsmith" -version = "0.1.139" +version = "0.1.143" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.139-py3-none-any.whl", hash = "sha256:2a4a541bfbd0a9727255df28a60048c85bc8c4c6a276975923785c3fd82dc879"}, - {file = "langsmith-0.1.139.tar.gz", hash = "sha256:2f9e4d32fef3ad7ef42c8506448cce3a31ad6b78bb4f3310db04ddaa1e9d744d"}, + {file = "langsmith-0.1.143-py3-none-any.whl", hash = "sha256:ba0d827269e9b03a90fababe41fa3e4e3f833300b95add10184f7e67167dde6f"}, + {file = "langsmith-0.1.143.tar.gz", hash = "sha256:4c5159e5cd84b3f8499433009e72d2076dd2daf6c044ac8a3611b30d0d0161c5"}, ] [package.dependencies] @@ -355,52 +355,55 @@ requests-toolbelt = ">=1.0.0,<2.0.0" [[package]] name = "mypy" -version = "0.991" +version = "1.13.0" description = "Optional static typing for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "mypy-0.991-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7d17e0a9707d0772f4a7b878f04b4fd11f6f5bcb9b3813975a9b13c9332153ab"}, - {file = "mypy-0.991-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0714258640194d75677e86c786e80ccf294972cc76885d3ebbb560f11db0003d"}, - {file = "mypy-0.991-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c8f3be99e8a8bd403caa8c03be619544bc2c77a7093685dcf308c6b109426c6"}, - {file = "mypy-0.991-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9ec663ed6c8f15f4ae9d3c04c989b744436c16d26580eaa760ae9dd5d662eb"}, - {file = "mypy-0.991-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4307270436fd7694b41f913eb09210faff27ea4979ecbcd849e57d2da2f65305"}, - {file = "mypy-0.991-cp310-cp310-win_amd64.whl", hash = "sha256:901c2c269c616e6cb0998b33d4adbb4a6af0ac4ce5cd078afd7bc95830e62c1c"}, - {file = "mypy-0.991-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d13674f3fb73805ba0c45eb6c0c3053d218aa1f7abead6e446d474529aafc372"}, - {file = "mypy-0.991-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1c8cd4fb70e8584ca1ed5805cbc7c017a3d1a29fb450621089ffed3e99d1857f"}, - {file = "mypy-0.991-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:209ee89fbb0deed518605edddd234af80506aec932ad28d73c08f1400ef80a33"}, - {file = "mypy-0.991-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37bd02ebf9d10e05b00d71302d2c2e6ca333e6c2a8584a98c00e038db8121f05"}, - {file = "mypy-0.991-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:26efb2fcc6b67e4d5a55561f39176821d2adf88f2745ddc72751b7890f3194ad"}, - {file = "mypy-0.991-cp311-cp311-win_amd64.whl", hash = "sha256:3a700330b567114b673cf8ee7388e949f843b356a73b5ab22dd7cff4742a5297"}, - {file = "mypy-0.991-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1f7d1a520373e2272b10796c3ff721ea1a0712288cafaa95931e66aa15798813"}, - {file = "mypy-0.991-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:641411733b127c3e0dab94c45af15fea99e4468f99ac88b39efb1ad677da5711"}, - {file = "mypy-0.991-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3d80e36b7d7a9259b740be6d8d906221789b0d836201af4234093cae89ced0cd"}, - {file = "mypy-0.991-cp37-cp37m-win_amd64.whl", hash = "sha256:e62ebaad93be3ad1a828a11e90f0e76f15449371ffeecca4a0a0b9adc99abcef"}, - {file = "mypy-0.991-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b86ce2c1866a748c0f6faca5232059f881cda6dda2a893b9a8373353cfe3715a"}, - {file = "mypy-0.991-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac6e503823143464538efda0e8e356d871557ef60ccd38f8824a4257acc18d93"}, - {file = "mypy-0.991-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cca5adf694af539aeaa6ac633a7afe9bbd760df9d31be55ab780b77ab5ae8bf"}, - {file = "mypy-0.991-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12c56bf73cdab116df96e4ff39610b92a348cc99a1307e1da3c3768bbb5b135"}, - {file = "mypy-0.991-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:652b651d42f155033a1967739788c436491b577b6a44e4c39fb340d0ee7f0d70"}, - {file = "mypy-0.991-cp38-cp38-win_amd64.whl", hash = "sha256:4175593dc25d9da12f7de8de873a33f9b2b8bdb4e827a7cae952e5b1a342e243"}, - {file = "mypy-0.991-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98e781cd35c0acf33eb0295e8b9c55cdbef64fcb35f6d3aa2186f289bed6e80d"}, - {file = "mypy-0.991-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d7464bac72a85cb3491c7e92b5b62f3dcccb8af26826257760a552a5e244aa5"}, - {file = "mypy-0.991-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9166b3f81a10cdf9b49f2d594b21b31adadb3d5e9db9b834866c3258b695be3"}, - {file = "mypy-0.991-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8472f736a5bfb159a5e36740847808f6f5b659960115ff29c7cecec1741c648"}, - {file = "mypy-0.991-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e80e758243b97b618cdf22004beb09e8a2de1af481382e4d84bc52152d1c476"}, - {file = "mypy-0.991-cp39-cp39-win_amd64.whl", hash = "sha256:74e259b5c19f70d35fcc1ad3d56499065c601dfe94ff67ae48b85596b9ec1461"}, - {file = "mypy-0.991-py3-none-any.whl", hash = "sha256:de32edc9b0a7e67c2775e574cb061a537660e51210fbf6006b0b36ea695ae9bb"}, - {file = "mypy-0.991.tar.gz", hash = "sha256:3c0165ba8f354a6d9881809ef29f1a9318a236a6d81c690094c5df32107bde06"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, ] [package.dependencies] -mypy-extensions = ">=0.4.3" +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=3.10" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] -python2 = ["typed-ast (>=1.4.0,<2)"] +mypyc = ["setuptools (>=50)"] reports = ["lxml"] [[package]] @@ -528,13 +531,13 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] @@ -815,28 +818,29 @@ requests = ">=2.0.1,<3.0.0" [[package]] name = "ruff" -version = "0.1.15" +version = "0.7.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"}, - {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"}, - {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"}, - {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"}, - {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"}, - {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"}, + {file = "ruff-0.7.4-py3-none-linux_armv6l.whl", hash = "sha256:a4919925e7684a3f18e18243cd6bea7cfb8e968a6eaa8437971f681b7ec51478"}, + {file = "ruff-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfb365c135b830778dda8c04fb7d4280ed0b984e1aec27f574445231e20d6c63"}, + {file = "ruff-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:63a569b36bc66fbadec5beaa539dd81e0527cb258b94e29e0531ce41bacc1f20"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d06218747d361d06fd2fdac734e7fa92df36df93035db3dc2ad7aa9852cb109"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0cea28d0944f74ebc33e9f934238f15c758841f9f5edd180b5315c203293452"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80094ecd4793c68b2571b128f91754d60f692d64bc0d7272ec9197fdd09bf9ea"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:997512325c6620d1c4c2b15db49ef59543ef9cd0f4aa8065ec2ae5103cedc7e7"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00b4cf3a6b5fad6d1a66e7574d78956bbd09abfd6c8a997798f01f5da3d46a05"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7dbdc7d8274e1422722933d1edddfdc65b4336abf0b16dfcb9dedd6e6a517d06"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e92dfb5f00eaedb1501b2f906ccabfd67b2355bdf117fea9719fc99ac2145bc"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3bd726099f277d735dc38900b6a8d6cf070f80828877941983a57bca1cd92172"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2e32829c429dd081ee5ba39aef436603e5b22335c3d3fff013cd585806a6486a"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:662a63b4971807623f6f90c1fb664613f67cc182dc4d991471c23c541fee62dd"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:876f5e09eaae3eb76814c1d3b68879891d6fde4824c015d48e7a7da4cf066a3a"}, + {file = "ruff-0.7.4-py3-none-win32.whl", hash = "sha256:75c53f54904be42dd52a548728a5b572344b50d9b2873d13a3f8c5e3b91f5cac"}, + {file = "ruff-0.7.4-py3-none-win_amd64.whl", hash = "sha256:745775c7b39f914238ed1f1b0bebed0b9155a17cd8bc0b08d3c87e4703b990d6"}, + {file = "ruff-0.7.4-py3-none-win_arm64.whl", hash = "sha256:11bff065102c3ae9d3ea4dc9ecdfe5a5171349cdd0787c1fc64761212fc9cf1f"}, + {file = "ruff-0.7.4.tar.gz", hash = "sha256:cd12e35031f5af6b9b93715d8c4f40360070b2041f81273d0527683d5708fce2"}, ] [[package]] @@ -881,13 +885,13 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tomli" -version = "2.0.2" +version = "2.1.0" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, - {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, ] [[package]] @@ -921,4 +925,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "4f6715617fbad64e03a1675f2edb9f77912dc156a79c8cef58eb145392efe21a" +content-hash = "9fa695cd9ce51479cc586c89b14577cf53a166bf74b590cee6b2d8c30e3663a9" diff --git a/libs/standard-tests/pyproject.toml b/libs/standard-tests/pyproject.toml index 4caa1fb651b297..8b1098c0003070 100644 --- a/libs/standard-tests/pyproject.toml +++ b/libs/standard-tests/pyproject.toml @@ -3,8 +3,9 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.poetry] -name = "langchain-standard-tests" -version = "0.3.0" +name = "langchain-tests" +packages = [{ include = "langchain_standard_tests" }] +version = "0.3.1" description = "Standard tests for LangChain implementations" authors = ["Erick Friis "] readme = "README.md" @@ -68,10 +69,10 @@ python = ">=3.12" codespell = "^2.2.0" [tool.poetry.group.lint.dependencies] -ruff = "^0.1.5" +ruff = ">=0.5" [tool.poetry.group.typing.dependencies] -mypy = "^0.991" +mypy = "^1" [tool.poetry.group.test.dependencies.langchain-core] path = "../core"