Skip to content

Commit

Permalink
json mode standard test (#25497)
Browse files Browse the repository at this point in the history
Co-authored-by: Chester Curme <[email protected]>
  • Loading branch information
baskaryan and ccurme authored Dec 17, 2024
1 parent 24bf242 commit e4d3ccf
Show file tree
Hide file tree
Showing 8 changed files with 127 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,7 @@ def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)

@property
def supports_json_mode(self) -> bool:
return True
8 changes: 8 additions & 0 deletions libs/partners/groq/tests/integration_tests/test_standard.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ def test_tool_message_histories_list_content(
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)

@property
def supports_json_mode(self) -> bool:
return True


class TestGroqLlama(BaseTestGroq):
@property
Expand All @@ -41,6 +45,10 @@ def tool_choice_value(self) -> Optional[str]:
"""Value to use for tool choice when used in tests."""
return "any"

@property
def supports_json_mode(self) -> bool:
return False # Not supported in streaming mode

@pytest.mark.xfail(
reason=("Fails with 'Failed to call a function. Please adjust your prompt.'")
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@ def chat_model_class(self) -> Type[BaseChatModel]:
def chat_model_params(self) -> dict:
return {"model": "mistral-large-latest", "temperature": 0}

@property
def supports_json_mode(self) -> bool:
return True

@property
def tool_choice_value(self) -> Optional[str]:
"""Value to use for tool choice when used in tests."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ def chat_model_params(self) -> dict:
def supports_image_inputs(self) -> bool:
return True

@property
def supports_json_mode(self) -> bool:
return True

@pytest.mark.xfail(
reason=(
"Fails with 'AssertionError'. Ollama does not support 'tool_choice' yet."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ def chat_model_params(self) -> dict:
def supports_image_inputs(self) -> bool:
return True

@property
def supports_json_mode(self) -> bool:
return True

@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ def chat_model_params(self) -> dict:
def supports_image_inputs(self) -> bool:
return True

@property
def supports_json_mode(self) -> bool:
return True

@property
def supported_usage_metadata_details(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def tool_choice_value(self) -> Optional[str]:
output.
By default, this is determined by whether the chat model's
`with_structured_output` method is overridden. If the base implementation is
``with_structured_output`` method is overridden. If the base implementation is
intended to be used, this method should be overridden.
See: https://python.langchain.com/docs/concepts/structured_outputs/
Expand All @@ -191,6 +191,21 @@ def tool_choice_value(self) -> Optional[str]:
def has_structured_output(self) -> bool:
return True
.. dropdown:: supports_json_mode
Boolean property indicating whether the chat model supports JSON mode in
``with_structured_output``.
See: https://python.langchain.com/docs/concepts/structured_outputs/#json-mode
Example:
.. code-block:: python
@property
def supports_json_mode(self) -> bool:
return True
.. dropdown:: supports_image_inputs
Boolean property indicating whether the chat model supports image inputs.
Expand Down Expand Up @@ -1295,6 +1310,68 @@ class Joke(BaseModel):
joke_result = chat.invoke("Give me a joke about cats, include the punchline.")
assert isinstance(joke_result, Joke)

def test_json_mode(self, model: BaseChatModel) -> None:
"""Test structured output via `JSON mode. <https://python.langchain.com/docs/concepts/structured_outputs/#json-mode>`_
This test is optional and should be skipped if the model does not support
the JSON mode feature (see Configuration below).
.. dropdown:: Configuration
To disable this test, set ``supports_json_mode`` to False in your
test class:
.. code-block:: python
class TestMyChatModelIntegration(ChatModelIntegrationTests):
@property
def supports_json_mode(self) -> bool:
return False
.. dropdown:: Troubleshooting
See example implementation of ``with_structured_output`` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
""" # noqa: E501
if not self.supports_json_mode:
pytest.skip("Test requires json mode support.")

from pydantic import BaseModel as BaseModelProper
from pydantic import Field as FieldProper

class Joke(BaseModelProper):
"""Joke to tell user."""

setup: str = FieldProper(description="question to set up a joke")
punchline: str = FieldProper(description="answer to resolve the joke")

# Pydantic class
# Type ignoring since the interface only officially supports pydantic 1
# or pydantic.v1.BaseModel but not pydantic.BaseModel from pydantic 2.
# We'll need to do a pass updating the type signatures.
chat = model.with_structured_output(Joke, method="json_mode") # type: ignore[arg-type]
msg = (
"Tell me a joke about cats. Return the result as a JSON with 'setup' and "
"'punchline' keys. Return nothing other than JSON."
)
result = chat.invoke(msg)
assert isinstance(result, Joke)

for chunk in chat.stream(msg):
assert isinstance(chunk, Joke)

# Schema
chat = model.with_structured_output(
Joke.model_json_schema(), method="json_mode"
)
result = chat.invoke(msg)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}

for chunk in chat.stream(msg):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict) # for mypy
assert set(chunk.keys()) == {"setup", "punchline"}

def test_tool_message_histories_string_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
Expand Down
22 changes: 21 additions & 1 deletion libs/standard-tests/langchain_tests/unit_tests/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,11 @@ def has_structured_output(self) -> bool:
is not BaseChatModel.with_structured_output
)

@property
def supports_json_mode(self) -> bool:
"""(bool) whether the chat model supports JSON mode."""
return False

@property
def supports_image_inputs(self) -> bool:
"""(bool) whether the chat model supports image inputs, defaults to
Expand Down Expand Up @@ -281,7 +286,7 @@ def tool_choice_value(self) -> Optional[str]:
output.
By default, this is determined by whether the chat model's
`with_structured_output` method is overridden. If the base implementation is
``with_structured_output`` method is overridden. If the base implementation is
intended to be used, this method should be overridden.
See: https://python.langchain.com/docs/concepts/structured_outputs/
Expand All @@ -294,6 +299,21 @@ def tool_choice_value(self) -> Optional[str]:
def has_structured_output(self) -> bool:
return True
.. dropdown:: supports_json_mode
Boolean property indicating whether the chat model supports JSON mode in
``with_structured_output``.
See: https://python.langchain.com/docs/concepts/structured_outputs/#json-mode
Example:
.. code-block:: python
@property
def supports_json_mode(self) -> bool:
return True
.. dropdown:: supports_image_inputs
Boolean property indicating whether the chat model supports image inputs.
Expand Down

0 comments on commit e4d3ccf

Please sign in to comment.