Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/_agent_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ async def _prepare_request_parameters(
output_schema = ctx.deps.output_schema

prompted_output_template = (
output_schema.template if isinstance(output_schema, _output.PromptedOutputSchema) else None
output_schema.template if isinstance(output_schema, _output.StructuredTextOutputSchema) else None
)

function_tools: list[ToolDefinition] = []
Expand Down
43 changes: 17 additions & 26 deletions pydantic_ai_slim/pydantic_ai/_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ def build( # noqa: C901
)

return NativeOutputSchema(
template=output.template,
processor=cls._build_processor(
flattened_outputs,
name=output.name,
Expand Down Expand Up @@ -439,27 +440,6 @@ def mode(self) -> OutputMode:
@dataclass(init=False)
class StructuredTextOutputSchema(OutputSchema[OutputDataT], ABC):
processor: BaseObjectOutputProcessor[OutputDataT]

def __init__(
self, *, processor: BaseObjectOutputProcessor[OutputDataT], allows_deferred_tools: bool, allows_image: bool
):
super().__init__(
text_processor=processor,
object_def=processor.object_def,
allows_deferred_tools=allows_deferred_tools,
allows_image=allows_image,
)
self.processor = processor


class NativeOutputSchema(StructuredTextOutputSchema[OutputDataT]):
@property
def mode(self) -> OutputMode:
return 'native'


@dataclass(init=False)
class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
template: str | None

def __init__(
Expand All @@ -471,16 +451,14 @@ def __init__(
allows_image: bool,
):
super().__init__(
processor=processor,
text_processor=processor,
object_def=processor.object_def,
allows_deferred_tools=allows_deferred_tools,
allows_image=allows_image,
)
self.processor = processor
self.template = template

@property
def mode(self) -> OutputMode:
return 'prompted'

@classmethod
def build_instructions(cls, template: str, object_def: OutputObjectDefinition) -> str:
"""Build instructions from a template and an object definition."""
Expand All @@ -496,6 +474,19 @@ def build_instructions(cls, template: str, object_def: OutputObjectDefinition) -
return template.format(schema=json.dumps(schema))


class NativeOutputSchema(StructuredTextOutputSchema[OutputDataT]):
@property
def mode(self) -> OutputMode:
return 'native'


@dataclass(init=False)
class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
@property
def mode(self) -> OutputMode:
return 'prompted'


@dataclass(init=False)
class ToolOutputSchema(OutputSchema[OutputDataT]):
def __init__(
Expand Down
13 changes: 8 additions & 5 deletions pydantic_ai_slim/pydantic_ai/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

from .. import _utils
from .._json_schema import JsonSchemaTransformer
from .._output import OutputObjectDefinition, PromptedOutputSchema
from .._output import OutputObjectDefinition, StructuredTextOutputSchema
from .._parts_manager import ModelResponsePartsManager
from .._run_context import RunContext
from ..builtin_tools import AbstractBuiltinTool
Expand Down Expand Up @@ -550,8 +550,8 @@ def tool_defs(self) -> dict[str, ToolDefinition]:

@cached_property
def prompted_output_instructions(self) -> str | None:
if self.output_mode == 'prompted' and self.prompted_output_template and self.output_object:
return PromptedOutputSchema.build_instructions(self.prompted_output_template, self.output_object)
if self.prompted_output_template and self.output_object:
return StructuredTextOutputSchema.build_instructions(self.prompted_output_template, self.output_object)
return None

__repr__ = _utils.dataclasses_no_defaults_repr
Expand Down Expand Up @@ -679,11 +679,14 @@ def prepare_request(
params = replace(params, output_tools=[])
if params.output_object and params.output_mode not in ('native', 'prompted'):
params = replace(params, output_object=None)
if params.prompted_output_template and params.output_mode != 'prompted':
if params.prompted_output_template and params.output_mode not in ('prompted', 'native'):
params = replace(params, prompted_output_template=None) # pragma: no cover

# Set default prompted output template
if params.output_mode == 'prompted' and not params.prompted_output_template:
if (
params.output_mode == 'prompted'
or (params.output_mode == 'native' and self.profile.native_output_requires_schema_in_instructions)
) and not params.prompted_output_template:
params = replace(params, prompted_output_template=self.profile.prompted_output_template)

# Check if output mode is supported
Expand Down
10 changes: 1 addition & 9 deletions pydantic_ai_slim/pydantic_ai/models/outlines.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import io
from collections.abc import AsyncIterable, AsyncIterator, Sequence
from contextlib import asynccontextmanager
from dataclasses import dataclass, replace
from dataclasses import dataclass
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Any, Literal, cast

Expand Down Expand Up @@ -525,14 +525,6 @@ async def _process_streamed_response(
_provider_name='outlines',
)

def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
"""Customize the model request parameters for the model."""
if model_request_parameters.output_mode in ('auto', 'native'):
# This way the JSON schema will be included in the instructions.
return replace(model_request_parameters, output_mode='prompted')
else:
return model_request_parameters


@dataclass
class OutlinesStreamedResponse(StreamedResponse):
Expand Down
8 changes: 8 additions & 0 deletions pydantic_ai_slim/pydantic_ai/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,12 @@ class NativeOutput(Generic[OutputDataT]):
"""The description of the structured output that will be passed to the model. If not specified and only one output is provided, the docstring of the output type or function will be used."""
strict: bool | None
"""Whether to use strict mode for the output, if the model supports it."""
template: str | None
"""Template for the prompt passed to the model.
The '{schema}' placeholder will be replaced with the output JSON schema.
If not specified, the default template specified on the model's profile will be used only if `native_output_requires_schema_in_instructions` on the profile is True.
If explicitly provided, it's used regardless of `native_output_requires_schema_in_instructions`.
Comment on lines +170 to +171
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Users mostly don't deal with profile fields directly, and should be able to assume that we've done our best to make them correct out of the box so that they don't have to worry about them, so I'd rather say:

Suggested change
If not specified, the default template specified on the model's profile will be used only if `native_output_requires_schema_in_instructions` on the profile is True.
If explicitly provided, it's used regardless of `native_output_requires_schema_in_instructions`.
If no template is specified but the model's profile indicates that it requires the schema to be sent as a prompt, the default template specified on the profile will be used.

"""

def __init__(
self,
Expand All @@ -172,11 +178,13 @@ def __init__(
name: str | None = None,
description: str | None = None,
strict: bool | None = None,
template: str | None = None,
):
self.outputs = outputs
self.name = name
self.description = description
self.strict = strict
self.template = template


@dataclass(init=False)
Expand Down
2 changes: 2 additions & 0 deletions pydantic_ai_slim/pydantic_ai/profiles/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ class ModelProfile:
"""
)
"""The instructions template to use for prompted structured output. The '{schema}' placeholder will be replaced with the JSON schema for the output."""
native_output_requires_schema_in_instructions: bool = False
"""Whether to add prompted output template in native structured output mode"""
json_schema_transformer: type[JsonSchemaTransformer] | None = None
"""The transformer to use to make JSON schemas for tools and structured output compatible with the model."""

Expand Down
1 change: 1 addition & 0 deletions pydantic_ai_slim/pydantic_ai/providers/outlines.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,5 @@ def model_profile(self, model_name: str) -> ModelProfile | None:
supports_json_schema_output=True,
supports_json_object_output=True,
default_structured_output_mode='native',
native_output_requires_schema_in_instructions=True,
)
2 changes: 2 additions & 0 deletions tests/models/test_outlines.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,7 @@ def test_init(model_loading_function_name: str, args: Callable[[], tuple[Any]])
supports_json_schema_output=True,
supports_json_object_output=True,
default_structured_output_mode='native',
native_output_requires_schema_in_instructions=True,
thinking_tags=('<think>', '</think>'),
ignore_streamed_leading_whitespace=False,
supported_builtin_tools=frozenset(),
Expand Down Expand Up @@ -309,6 +310,7 @@ def test_model_loading_methods(model_loading_function_name: str, args: Callable[
supports_json_schema_output=True,
supports_json_object_output=True,
default_structured_output_mode='native',
native_output_requires_schema_in_instructions=True,
thinking_tags=('<think>', '</think>'),
ignore_streamed_leading_whitespace=False,
supported_builtin_tools=frozenset(),
Expand Down
1 change: 1 addition & 0 deletions tests/providers/test_outlines.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ def test_outlines_provider() -> None:
supports_json_schema_output=True,
supports_json_object_output=True,
default_structured_output_mode='native',
native_output_requires_schema_in_instructions=True,
thinking_tags=('<think>', '</think>'),
ignore_streamed_leading_whitespace=False,
)
106 changes: 106 additions & 0 deletions tests/test_native_output_schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
from pydantic import BaseModel

from pydantic_ai import Agent
from pydantic_ai.models.test import TestModel
from pydantic_ai.output import NativeOutput
from pydantic_ai.profiles import ModelProfile


class City(BaseModel):
name: str
population: int


async def test_native_output_schema_injection_from_profile():
"""
Test that `native_output_requires_schema_in_instructions=True` in the profile
causes the schema instructions to be injected, even when using `NativeOutput` implicitly or explicitly.
"""
profile = ModelProfile(
supports_json_schema_output=True,
default_structured_output_mode='native',
native_output_requires_schema_in_instructions=True,
prompted_output_template='SCHEMA: {schema}',
)
model = TestModel(profile=profile, custom_output_text='{ "name": "Paris", "population": 9000000 }')
agent = Agent(model, output_type=City)

await agent.run('Paris')

params = model.last_model_request_parameters
assert params
assert params.prompted_output_instructions is not None
assert 'SCHEMA:' in params.prompted_output_instructions
assert 'City' in params.prompted_output_instructions


async def test_native_output_custom_template_override():
"""
Test that providing a `template` in `NativeOutput` uses that template,
regardless of the profile setting (even if injection is disabled in profile).
"""
profile = ModelProfile(
supports_json_schema_output=True,
default_structured_output_mode='native',
native_output_requires_schema_in_instructions=False, # Disabled in profile
)
model = TestModel(profile=profile, custom_output_text='{ "name": "London", "population": 9000000 }')
agent = Agent(model)

# Use NativeOutput with explicit template
await agent.run(
'London',
output_type=NativeOutput(City, template='CUSTOM TEMPLATE: {schema}'),
)

params = model.last_model_request_parameters
assert params
assert params.prompted_output_instructions is not None
assert 'CUSTOM TEMPLATE:' in params.prompted_output_instructions
assert 'City' in params.prompted_output_instructions


async def test_native_output_custom_template_precedence():
"""
Test that providing a `template` in `NativeOutput` takes precedence over the profile default,
even if injection is enabled in the profile.
"""
profile = ModelProfile(
supports_json_schema_output=True,
default_structured_output_mode='native',
native_output_requires_schema_in_instructions=True,
prompted_output_template='DEFAULT SCHEMA: {schema}',
)
model = TestModel(profile=profile, custom_output_text='{ "name": "China", "population": 9000000 }')
agent = Agent(model)

await agent.run(
'China',
output_type=NativeOutput(City, template='OVERRIDE TEMPLATE: {schema}'),
)

params = model.last_model_request_parameters
assert params
assert params.prompted_output_instructions is not None
assert 'OVERRIDE TEMPLATE:' in params.prompted_output_instructions
assert 'DEFAULT SCHEMA:' not in params.prompted_output_instructions


async def test_native_output_no_injection_by_default():
"""
Test that without the profile setting and without a custom template,
no instructions are injected for NativeOutput (default behavior).
"""
profile = ModelProfile(
supports_json_schema_output=True,
default_structured_output_mode='native',
native_output_requires_schema_in_instructions=False,
)
model = TestModel(profile=profile, custom_output_text='{ "name": "Tokyo", "population": 9000000 }')
agent = Agent(model, output_type=City)

await agent.run('Tokyo')

params = model.last_model_request_parameters
assert params
assert params.prompted_output_instructions is None