diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py
index 92c45a0c52..1718b1e32d 100644
--- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py
+++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py
@@ -380,7 +380,7 @@ async def _prepare_request_parameters(
output_schema = ctx.deps.output_schema
prompted_output_template = (
- output_schema.template if isinstance(output_schema, _output.PromptedOutputSchema) else None
+ output_schema.template if isinstance(output_schema, _output.StructuredTextOutputSchema) else None
)
function_tools: list[ToolDefinition] = []
diff --git a/pydantic_ai_slim/pydantic_ai/_output.py b/pydantic_ai_slim/pydantic_ai/_output.py
index 58ae8c2a26..123678d7b1 100644
--- a/pydantic_ai_slim/pydantic_ai/_output.py
+++ b/pydantic_ai_slim/pydantic_ai/_output.py
@@ -265,6 +265,7 @@ def build( # noqa: C901
)
return NativeOutputSchema(
+ template=output.template,
processor=cls._build_processor(
flattened_outputs,
name=output.name,
@@ -439,27 +440,6 @@ def mode(self) -> OutputMode:
@dataclass(init=False)
class StructuredTextOutputSchema(OutputSchema[OutputDataT], ABC):
processor: BaseObjectOutputProcessor[OutputDataT]
-
- def __init__(
- self, *, processor: BaseObjectOutputProcessor[OutputDataT], allows_deferred_tools: bool, allows_image: bool
- ):
- super().__init__(
- text_processor=processor,
- object_def=processor.object_def,
- allows_deferred_tools=allows_deferred_tools,
- allows_image=allows_image,
- )
- self.processor = processor
-
-
-class NativeOutputSchema(StructuredTextOutputSchema[OutputDataT]):
- @property
- def mode(self) -> OutputMode:
- return 'native'
-
-
-@dataclass(init=False)
-class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
template: str | None
def __init__(
@@ -471,16 +451,14 @@ def __init__(
allows_image: bool,
):
super().__init__(
- processor=processor,
+ text_processor=processor,
+ object_def=processor.object_def,
allows_deferred_tools=allows_deferred_tools,
allows_image=allows_image,
)
+ self.processor = processor
self.template = template
- @property
- def mode(self) -> OutputMode:
- return 'prompted'
-
@classmethod
def build_instructions(cls, template: str, object_def: OutputObjectDefinition) -> str:
"""Build instructions from a template and an object definition."""
@@ -496,6 +474,19 @@ def build_instructions(cls, template: str, object_def: OutputObjectDefinition) -
return template.format(schema=json.dumps(schema))
+class NativeOutputSchema(StructuredTextOutputSchema[OutputDataT]):
+ @property
+ def mode(self) -> OutputMode:
+ return 'native'
+
+
+@dataclass(init=False)
+class PromptedOutputSchema(StructuredTextOutputSchema[OutputDataT]):
+ @property
+ def mode(self) -> OutputMode:
+ return 'prompted'
+
+
@dataclass(init=False)
class ToolOutputSchema(OutputSchema[OutputDataT]):
def __init__(
diff --git a/pydantic_ai_slim/pydantic_ai/models/__init__.py b/pydantic_ai_slim/pydantic_ai/models/__init__.py
index 1fedefa625..32cfd8e457 100644
--- a/pydantic_ai_slim/pydantic_ai/models/__init__.py
+++ b/pydantic_ai_slim/pydantic_ai/models/__init__.py
@@ -21,7 +21,7 @@
from .. import _utils
from .._json_schema import JsonSchemaTransformer
-from .._output import OutputObjectDefinition, PromptedOutputSchema
+from .._output import OutputObjectDefinition, StructuredTextOutputSchema
from .._parts_manager import ModelResponsePartsManager
from .._run_context import RunContext
from ..builtin_tools import AbstractBuiltinTool
@@ -550,8 +550,8 @@ def tool_defs(self) -> dict[str, ToolDefinition]:
@cached_property
def prompted_output_instructions(self) -> str | None:
- if self.output_mode == 'prompted' and self.prompted_output_template and self.output_object:
- return PromptedOutputSchema.build_instructions(self.prompted_output_template, self.output_object)
+ if self.prompted_output_template and self.output_object:
+ return StructuredTextOutputSchema.build_instructions(self.prompted_output_template, self.output_object)
return None
__repr__ = _utils.dataclasses_no_defaults_repr
@@ -679,11 +679,14 @@ def prepare_request(
params = replace(params, output_tools=[])
if params.output_object and params.output_mode not in ('native', 'prompted'):
params = replace(params, output_object=None)
- if params.prompted_output_template and params.output_mode != 'prompted':
+ if params.prompted_output_template and params.output_mode not in ('prompted', 'native'):
params = replace(params, prompted_output_template=None) # pragma: no cover
# Set default prompted output template
- if params.output_mode == 'prompted' and not params.prompted_output_template:
+ if (
+ params.output_mode == 'prompted'
+ or (params.output_mode == 'native' and self.profile.native_output_requires_schema_in_instructions)
+ ) and not params.prompted_output_template:
params = replace(params, prompted_output_template=self.profile.prompted_output_template)
# Check if output mode is supported
diff --git a/pydantic_ai_slim/pydantic_ai/models/outlines.py b/pydantic_ai_slim/pydantic_ai/models/outlines.py
index 4523183b1f..2e95f661c5 100644
--- a/pydantic_ai_slim/pydantic_ai/models/outlines.py
+++ b/pydantic_ai_slim/pydantic_ai/models/outlines.py
@@ -8,7 +8,7 @@
import io
from collections.abc import AsyncIterable, AsyncIterator, Sequence
from contextlib import asynccontextmanager
-from dataclasses import dataclass, replace
+from dataclasses import dataclass
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Any, Literal, cast
@@ -525,14 +525,6 @@ async def _process_streamed_response(
_provider_name='outlines',
)
- def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
- """Customize the model request parameters for the model."""
- if model_request_parameters.output_mode in ('auto', 'native'):
- # This way the JSON schema will be included in the instructions.
- return replace(model_request_parameters, output_mode='prompted')
- else:
- return model_request_parameters
-
@dataclass
class OutlinesStreamedResponse(StreamedResponse):
diff --git a/pydantic_ai_slim/pydantic_ai/output.py b/pydantic_ai_slim/pydantic_ai/output.py
index cd5e5865a6..ae8d0fd39b 100644
--- a/pydantic_ai_slim/pydantic_ai/output.py
+++ b/pydantic_ai_slim/pydantic_ai/output.py
@@ -164,6 +164,11 @@ class NativeOutput(Generic[OutputDataT]):
"""The description of the structured output that will be passed to the model. If not specified and only one output is provided, the docstring of the output type or function will be used."""
strict: bool | None
"""Whether to use strict mode for the output, if the model supports it."""
+ template: str | None
+ """Template for the prompt passed to the model.
+ The '{schema}' placeholder will be replaced with the output JSON schema.
+ If no template is specified but the model's profile indicates that it requires the schema to be sent as a prompt, the default template specified on the profile will be used.
+ """
def __init__(
self,
@@ -172,11 +177,13 @@ def __init__(
name: str | None = None,
description: str | None = None,
strict: bool | None = None,
+ template: str | None = None,
):
self.outputs = outputs
self.name = name
self.description = description
self.strict = strict
+ self.template = template
@dataclass(init=False)
diff --git a/pydantic_ai_slim/pydantic_ai/profiles/__init__.py b/pydantic_ai_slim/pydantic_ai/profiles/__init__.py
index 1eae6293d6..9a3221752d 100644
--- a/pydantic_ai_slim/pydantic_ai/profiles/__init__.py
+++ b/pydantic_ai_slim/pydantic_ai/profiles/__init__.py
@@ -51,6 +51,8 @@ class ModelProfile:
"""
)
"""The instructions template to use for prompted structured output. The '{schema}' placeholder will be replaced with the JSON schema for the output."""
+ native_output_requires_schema_in_instructions: bool = False
+ """Whether to add prompted output template in native structured output mode"""
json_schema_transformer: type[JsonSchemaTransformer] | None = None
"""The transformer to use to make JSON schemas for tools and structured output compatible with the model."""
diff --git a/pydantic_ai_slim/pydantic_ai/providers/outlines.py b/pydantic_ai_slim/pydantic_ai/providers/outlines.py
index cd0fcad0f7..27e77902af 100644
--- a/pydantic_ai_slim/pydantic_ai/providers/outlines.py
+++ b/pydantic_ai_slim/pydantic_ai/providers/outlines.py
@@ -37,4 +37,5 @@ def model_profile(self, model_name: str) -> ModelProfile | None:
supports_json_schema_output=True,
supports_json_object_output=True,
default_structured_output_mode='native',
+ native_output_requires_schema_in_instructions=True,
)
diff --git a/tests/models/test_outlines.py b/tests/models/test_outlines.py
index b709488c1a..ba417e3a19 100644
--- a/tests/models/test_outlines.py
+++ b/tests/models/test_outlines.py
@@ -250,6 +250,7 @@ def test_init(model_loading_function_name: str, args: Callable[[], tuple[Any]])
supports_json_schema_output=True,
supports_json_object_output=True,
default_structured_output_mode='native',
+ native_output_requires_schema_in_instructions=True,
thinking_tags=('', ''),
ignore_streamed_leading_whitespace=False,
supported_builtin_tools=frozenset(),
@@ -309,6 +310,7 @@ def test_model_loading_methods(model_loading_function_name: str, args: Callable[
supports_json_schema_output=True,
supports_json_object_output=True,
default_structured_output_mode='native',
+ native_output_requires_schema_in_instructions=True,
thinking_tags=('', ''),
ignore_streamed_leading_whitespace=False,
supported_builtin_tools=frozenset(),
diff --git a/tests/providers/test_outlines.py b/tests/providers/test_outlines.py
index 1127d81620..32d8c95281 100644
--- a/tests/providers/test_outlines.py
+++ b/tests/providers/test_outlines.py
@@ -31,6 +31,7 @@ def test_outlines_provider() -> None:
supports_json_schema_output=True,
supports_json_object_output=True,
default_structured_output_mode='native',
+ native_output_requires_schema_in_instructions=True,
thinking_tags=('', ''),
ignore_streamed_leading_whitespace=False,
)
diff --git a/tests/test_native_output_schema.py b/tests/test_native_output_schema.py
new file mode 100644
index 0000000000..c35a6a46ab
--- /dev/null
+++ b/tests/test_native_output_schema.py
@@ -0,0 +1,106 @@
+from pydantic import BaseModel
+
+from pydantic_ai import Agent
+from pydantic_ai.models.test import TestModel
+from pydantic_ai.output import NativeOutput
+from pydantic_ai.profiles import ModelProfile
+
+
+class City(BaseModel):
+ name: str
+ population: int
+
+
+async def test_native_output_schema_injection_from_profile():
+ """
+ Test that `native_output_requires_schema_in_instructions=True` in the profile
+ causes the schema instructions to be injected, even when using `NativeOutput` implicitly or explicitly.
+ """
+ profile = ModelProfile(
+ supports_json_schema_output=True,
+ default_structured_output_mode='native',
+ native_output_requires_schema_in_instructions=True,
+ prompted_output_template='SCHEMA: {schema}',
+ )
+ model = TestModel(profile=profile, custom_output_text='{ "name": "Paris", "population": 9000000 }')
+ agent = Agent(model, output_type=City)
+
+ await agent.run('Paris')
+
+ params = model.last_model_request_parameters
+ assert params
+ assert params.prompted_output_instructions is not None
+ assert 'SCHEMA:' in params.prompted_output_instructions
+ assert 'City' in params.prompted_output_instructions
+
+
+async def test_native_output_custom_template_override():
+ """
+ Test that providing a `template` in `NativeOutput` uses that template,
+ regardless of the profile setting (even if injection is disabled in profile).
+ """
+ profile = ModelProfile(
+ supports_json_schema_output=True,
+ default_structured_output_mode='native',
+ native_output_requires_schema_in_instructions=False, # Disabled in profile
+ )
+ model = TestModel(profile=profile, custom_output_text='{ "name": "London", "population": 9000000 }')
+ agent = Agent(model)
+
+ # Use NativeOutput with explicit template
+ await agent.run(
+ 'London',
+ output_type=NativeOutput(City, template='CUSTOM TEMPLATE: {schema}'),
+ )
+
+ params = model.last_model_request_parameters
+ assert params
+ assert params.prompted_output_instructions is not None
+ assert 'CUSTOM TEMPLATE:' in params.prompted_output_instructions
+ assert 'City' in params.prompted_output_instructions
+
+
+async def test_native_output_custom_template_precedence():
+ """
+ Test that providing a `template` in `NativeOutput` takes precedence over the profile default,
+ even if injection is enabled in the profile.
+ """
+ profile = ModelProfile(
+ supports_json_schema_output=True,
+ default_structured_output_mode='native',
+ native_output_requires_schema_in_instructions=True,
+ prompted_output_template='DEFAULT SCHEMA: {schema}',
+ )
+ model = TestModel(profile=profile, custom_output_text='{ "name": "China", "population": 9000000 }')
+ agent = Agent(model)
+
+ await agent.run(
+ 'China',
+ output_type=NativeOutput(City, template='OVERRIDE TEMPLATE: {schema}'),
+ )
+
+ params = model.last_model_request_parameters
+ assert params
+ assert params.prompted_output_instructions is not None
+ assert 'OVERRIDE TEMPLATE:' in params.prompted_output_instructions
+ assert 'DEFAULT SCHEMA:' not in params.prompted_output_instructions
+
+
+async def test_native_output_no_injection_by_default():
+ """
+ Test that without the profile setting and without a custom template,
+ no instructions are injected for NativeOutput (default behavior).
+ """
+ profile = ModelProfile(
+ supports_json_schema_output=True,
+ default_structured_output_mode='native',
+ native_output_requires_schema_in_instructions=False,
+ )
+ model = TestModel(profile=profile, custom_output_text='{ "name": "Tokyo", "population": 9000000 }')
+ agent = Agent(model, output_type=City)
+
+ await agent.run('Tokyo')
+
+ params = model.last_model_request_parameters
+ assert params
+ assert params.prompted_output_instructions is None