add agenton user prompt composition

This commit is contained in:
盐粒 Yanli 2026-05-07 23:05:43 +08:00
parent f316d19be6
commit 8c6e9c3b95
15 changed files with 293 additions and 66 deletions

View File

@ -40,7 +40,17 @@ Use `CompositorBuilder` to mix serializable config nodes with live instances:
```python
compositor = (
CompositorBuilder(registry)
.add_config({"layers": [{"name": "prompt", "type": "plain.prompt", "config": {"prefix": "Hi"}}]})
.add_config(
{
"layers": [
{
"name": "prompt",
"type": "plain.prompt",
"config": {"prefix": "Hi", "user": "Answer with examples."},
}
]
}
)
.add_instance(name="profile", layer=ObjectLayer(profile))
.build()
)
@ -49,6 +59,19 @@ compositor = (
Use `.add_instance()` for layers that require Python objects or callables, such
as `ObjectLayer`, `ToolsLayer`, and dynamic tool layers.
## System prompts and user prompts
Layers expose three prompt surfaces:
- `prefix_prompts`: system prompt fragments collected in layer order.
- `suffix_prompts`: system prompt fragments collected in reverse layer order.
- `user_prompts`: user-message fragments collected in layer order.
`PromptLayer` accepts `prefix`, `user`, and `suffix` config fields. For
pydantic-ai, `PYDANTIC_AI_TRANSFORMERS` maps `compositor.prompts` to system
prompt functions and `compositor.user_prompts` to values suitable for
`Agent.run(user_prompt=...)`.
## Session snapshot and restore
`Compositor.snapshot_session(session)` serializes non-active sessions, including

View File

@ -86,6 +86,7 @@ async def main() -> None:
"type": "plain.prompt",
"config": {
"prefix": "Use config dicts for serializable layers.",
"user": "Explain how the composed agent should use its layers.",
"suffix": "Before finalizing, make the result easy to scan.",
},
},
@ -115,6 +116,10 @@ async def main() -> None:
for prompt in compositor.prompts:
print(f"- {prompt.value}")
print("\nUser prompts:")
for prompt in compositor.user_prompts:
print(f"- {prompt.value}")
print("\nTools:")
for tool in compositor.tools:
print(f"- {tool.value.__name__}{signature(tool.value)}")

View File

@ -51,6 +51,7 @@ async def main() -> None:
)
pydantic_ai_bridge = PydanticAIBridgeLayer[AgentProfile](
prefix=("Prefer concrete details.", profile_prompt, tone_prompt),
user="Use the tools for 'layer composition'.",
tool_entries=(write_tagline,),
)
@ -96,10 +97,7 @@ async def main() -> None:
for prompt in compositor.prompts:
_ = agent.system_prompt(prompt)
result = await agent.run(
"Use the tools for 'layer composition'.",
deps=pydantic_ai_bridge.run_deps,
)
result = await agent.run(compositor.user_prompts, deps=pydantic_ai_bridge.run_deps)
for line in _format_messages(result.all_messages()):
print(line)

View File

@ -12,9 +12,10 @@ not to the shared layer instances, so different sessions can enter the same
compositor without leaking generated ids or handles through ``self``.
Dependency mappings use layer-local dependency names as keys and compositor
layer names as values. Prompt aggregation depends on insertion order: prefix
prompts are collected from first to last layer, while suffix prompts are
collected in reverse.
layer names as values. System prompt aggregation depends on insertion order:
prefix prompts are collected from first to last layer, while suffix prompts are
collected in reverse. User prompts are collected from first to last layer so the
composed user message preserves graph order.
Serializable graph config uses registry type ids rather than import paths.
``CompositorBuilder`` resolves config nodes through ``LayerRegistry`` and can
@ -26,11 +27,11 @@ whose layer controls must match the compositor layer names and order. When
omitted, a fresh session is created. Reusing a suspended session resumes its
layer contexts; closed sessions must be replaced.
Optional prompt and tool transformers run after layer aggregation. The
compositor asks each layer to ``wrap_prompt`` and ``wrap_tool`` its native
values, so typed layer families can tag prompt/tool values without changing
their authoring contracts. When transformers are omitted, the compositor
returns those wrapped items unchanged.
Optional prompt, user prompt, and tool transformers run after layer aggregation.
The compositor asks each layer to ``wrap_prompt``, ``wrap_user_prompt``, and
``wrap_tool`` its native values, so typed layer families can tag values without
changing their authoring contracts. When transformers are omitted, the
compositor returns those wrapped items unchanged.
"""
from collections import OrderedDict
@ -43,21 +44,31 @@ from pydantic import BaseModel, ConfigDict, Field, JsonValue
from typing_extensions import Self, TypeVar
from agenton.layers.base import Layer, LayerControl, LifecycleState
from agenton.layers.types import AllPromptTypes, AllToolTypes
from agenton.layers.types import AllPromptTypes, AllToolTypes, AllUserPromptTypes
PromptT = TypeVar("PromptT", default=AllPromptTypes)
ToolT = TypeVar("ToolT", default=AllToolTypes)
LayerPromptT = TypeVar("LayerPromptT", default=AllPromptTypes)
LayerToolT = TypeVar("LayerToolT", default=AllToolTypes)
UserPromptT = TypeVar("UserPromptT", default=AllUserPromptTypes)
LayerUserPromptT = TypeVar("LayerUserPromptT", default=AllUserPromptTypes)
type CompositorTransformer[InputT, OutputT] = Callable[[Sequence[InputT]], Sequence[OutputT]]
class CompositorTransformerKwargs[PromptT, ToolT, LayerPromptT, LayerToolT](TypedDict):
"""Keyword arguments that install prompt and tool transformers together."""
class CompositorTransformerKwargs[
PromptT,
ToolT,
LayerPromptT,
LayerToolT,
UserPromptT,
LayerUserPromptT,
](TypedDict):
"""Keyword arguments that install prompt, user prompt, and tool transformers."""
prompt_transformer: CompositorTransformer[LayerPromptT, PromptT]
user_prompt_transformer: CompositorTransformer[LayerUserPromptT, UserPromptT]
tool_transformer: CompositorTransformer[LayerToolT, ToolT]
@ -114,7 +125,7 @@ class LayerDescriptor:
"""Registry descriptor inferred from a layer class."""
type_id: str
layer_type: type[Layer[Any, Any, Any, Any, Any, Any]]
layer_type: type[Layer[Any, Any, Any, Any, Any, Any, Any]]
config_type: type[BaseModel]
runtime_state_type: type[BaseModel]
runtime_handles_type: type[BaseModel]
@ -137,7 +148,7 @@ class LayerRegistry:
def register_layer(
self,
layer_type: type[Layer[Any, Any, Any, Any, Any, Any]],
layer_type: type[Layer[Any, Any, Any, Any, Any, Any, Any]],
*,
type_id: str | None = None,
) -> None:
@ -233,7 +244,7 @@ class CompositorSessionSnapshot(BaseModel):
@dataclass(frozen=True, slots=True)
class _LayerBuildEntry:
name: str
layer: Layer[Any, Any, Any, Any, Any, Any]
layer: Layer[Any, Any, Any, Any, Any, Any, Any]
deps: Mapping[str, str]
@ -283,21 +294,22 @@ class CompositorBuilder:
self,
*,
name: str,
layer: Layer[Any, Any, Any, Any, Any, Any],
layer: Layer[Any, Any, Any, Any, Any, Any, Any],
deps: Mapping[str, str] | None = None,
) -> Self:
"""Add a live layer instance, useful for Python objects and callables."""
self._entries.append(_LayerBuildEntry(name=name, layer=layer, deps=dict(deps or {})))
return self
def build[PromptT, ToolT, LayerPromptT, LayerToolT](
def build[PromptT, ToolT, LayerPromptT, LayerToolT, UserPromptT, LayerUserPromptT](
self,
*,
prompt_transformer: CompositorTransformer[LayerPromptT, PromptT] | None = None,
user_prompt_transformer: CompositorTransformer[LayerUserPromptT, UserPromptT] | None = None,
tool_transformer: CompositorTransformer[LayerToolT, ToolT] | None = None,
) -> "Compositor[PromptT, ToolT, LayerPromptT, LayerToolT]":
) -> "Compositor[PromptT, ToolT, LayerPromptT, LayerToolT, UserPromptT, LayerUserPromptT]":
"""Validate names/dependencies, bind deps, and return a compositor."""
layers: OrderedDict[str, Layer[Any, Any, Any, Any, Any, Any]] = OrderedDict()
layers: OrderedDict[str, Layer[Any, Any, Any, Any, Any, Any, Any]] = OrderedDict()
deps_name_mapping: dict[str, Mapping[str, str]] = {}
for entry in self._entries:
if entry.name in layers:
@ -321,24 +333,28 @@ class CompositorBuilder:
layers=layers,
deps_name_mapping=deps_name_mapping,
prompt_transformer=prompt_transformer,
user_prompt_transformer=user_prompt_transformer,
tool_transformer=tool_transformer,
)
@dataclass(kw_only=True)
class Compositor(Generic[PromptT, ToolT, LayerPromptT, LayerToolT]):
class Compositor(Generic[PromptT, ToolT, LayerPromptT, LayerToolT, UserPromptT, LayerUserPromptT]):
"""Framework-neutral ordered layer graph with lifecycle and aggregation.
``prompt_transformer`` and ``tool_transformer`` are post-aggregation hooks:
they run whenever ``prompts`` or ``tools`` is read, after layer
``prompt_transformer``, ``user_prompt_transformer``, and
``tool_transformer`` are post-aggregation hooks: they run whenever
``prompts``, ``user_prompts``, or ``tools`` is read, after layer
contributions have been collected in compositor order. Use two type
arguments for identity aggregation, or all four when layer item types differ
from exposed item types.
arguments for identity aggregation, four when prompt/tool layer item types
differ from exposed item types, or all six when user prompt item types also
differ.
"""
layers: OrderedDict[str, Layer[Any, Any, Any, Any, Any, Any]]
layers: OrderedDict[str, Layer[Any, Any, Any, Any, Any, Any, Any]]
deps_name_mapping: Mapping[str, Mapping[str, str]] = field(default_factory=dict)
prompt_transformer: CompositorTransformer[LayerPromptT, PromptT] | None = None
user_prompt_transformer: CompositorTransformer[LayerUserPromptT, UserPromptT] | None = None
tool_transformer: CompositorTransformer[LayerToolT, ToolT] | None = None
_deps_bound: bool = field(default=False, init=False)
@ -352,11 +368,13 @@ class Compositor(Generic[PromptT, ToolT, LayerPromptT, LayerToolT]):
*,
registry: LayerRegistry,
prompt_transformer: CompositorTransformer[LayerPromptT, PromptT] | None = None,
user_prompt_transformer: CompositorTransformer[LayerUserPromptT, UserPromptT] | None = None,
tool_transformer: CompositorTransformer[LayerToolT, ToolT] | None = None,
) -> "Compositor[PromptT, ToolT, LayerPromptT, LayerToolT]":
) -> "Compositor[PromptT, ToolT, LayerPromptT, LayerToolT, UserPromptT, LayerUserPromptT]":
"""Create a compositor from registry-backed serializable config."""
return CompositorBuilder(registry).add_config(conf).build(
prompt_transformer=prompt_transformer,
user_prompt_transformer=user_prompt_transformer,
tool_transformer=tool_transformer,
)
@ -513,6 +531,18 @@ class Compositor(Generic[PromptT, ToolT, LayerPromptT, LayerToolT]):
return cast(list[PromptT], result)
return list(self.prompt_transformer(result))
@property
def user_prompts(self) -> list[UserPromptT]:
result: list[LayerUserPromptT] = []
for layer in self.layers.values():
result.extend(
cast(LayerUserPromptT, layer.wrap_user_prompt(prompt))
for prompt in layer.user_prompts
)
if self.user_prompt_transformer is None:
return cast(list[UserPromptT], result)
return list(self.user_prompt_transformer(result))
@property
def tools(self) -> list[ToolT]:
result: list[LayerToolT] = []

View File

@ -19,21 +19,27 @@ from agenton.layers.base import (
from agenton.layers.types import (
AllPromptTypes,
AllToolTypes,
AllUserPromptTypes,
PlainLayer,
PlainPrompt,
PlainPromptType,
PlainTool,
PlainToolType,
PlainUserPrompt,
PlainUserPromptType,
PydanticAILayer,
PydanticAIPrompt,
PydanticAIPromptType,
PydanticAITool,
PydanticAIToolType,
PydanticAIUserPrompt,
PydanticAIUserPromptType,
)
__all__ = [
"AllPromptTypes",
"AllToolTypes",
"AllUserPromptTypes",
"Layer",
"LayerDeps",
"LayerControl",
@ -46,11 +52,15 @@ __all__ = [
"PlainLayer",
"PlainPrompt",
"PlainPromptType",
"PlainUserPrompt",
"PlainUserPromptType",
"PlainTool",
"PlainToolType",
"PydanticAILayer",
"PydanticAIPrompt",
"PydanticAIPromptType",
"PydanticAIUserPrompt",
"PydanticAIUserPromptType",
"PydanticAITool",
"PydanticAIToolType",
]

View File

@ -24,11 +24,12 @@ other snapshot data belong in ``LayerControl.runtime_state``; live clients,
connections, and process handles belong in ``LayerControl.runtime_handles``.
Neither category should be stored on ``self`` when it is session-local.
``Layer`` is framework-neutral over prompt and tool item types. The native
``prefix_prompts``, ``suffix_prompts``, and ``tools`` properties are the layer
authoring surface. ``wrap_prompt`` and ``wrap_tool`` are the compositor
aggregation surface; typed families such as ``agenton.layers.types.PlainLayer``
implement them to tag native values without changing layer implementations.
``Layer`` is framework-neutral over system prompt, user prompt, and tool item
types. The native ``prefix_prompts``, ``suffix_prompts``, ``user_prompts``, and
``tools`` properties are the layer authoring surface. ``wrap_prompt``,
``wrap_user_prompt``, and ``wrap_tool`` are the compositor aggregation surface;
typed families such as ``agenton.layers.types.PlainLayer`` implement them to tag
native values without changing layer implementations.
"""
from abc import ABC, abstractmethod
@ -45,6 +46,7 @@ from typing_extensions import Self, TypeVar
_DepsT = TypeVar("_DepsT", bound="LayerDeps")
_PromptT = TypeVar("_PromptT")
_UserPromptT = TypeVar("_UserPromptT")
_ToolT = TypeVar("_ToolT")
_ConfigT = TypeVar("_ConfigT", bound=BaseModel, default="EmptyLayerConfig")
_RuntimeStateT = TypeVar("_RuntimeStateT", bound=BaseModel, default="EmptyRuntimeState")
@ -59,7 +61,7 @@ class LayerDeps:
are always assigned as attributes; missing optional values become ``None``.
"""
def __init__(self, **deps: "Layer[Any, Any, Any, Any, Any, Any] | None") -> None:
def __init__(self, **deps: "Layer[Any, Any, Any, Any, Any, Any, Any] | None") -> None:
dep_specs = _get_dep_specs(type(self))
missing_names = {name for name, spec in dep_specs.items() if not spec.optional} - deps.keys()
if missing_names:
@ -169,13 +171,13 @@ class LayerControl(Generic[_RuntimeStateT, _RuntimeHandlesT]):
class LayerDepSpec:
"""Runtime dependency specification derived from a deps annotation."""
layer_type: type["Layer[Any, Any, Any, Any, Any, Any]"]
layer_type: type["Layer[Any, Any, Any, Any, Any, Any, Any]"]
optional: bool = False
class Layer(
ABC,
Generic[_DepsT, _PromptT, _ToolT, _ConfigT, _RuntimeStateT, _RuntimeHandlesT],
Generic[_DepsT, _PromptT, _UserPromptT, _ToolT, _ConfigT, _RuntimeStateT, _RuntimeHandlesT],
):
"""Framework-neutral base class for prompt/tool layers.
@ -211,17 +213,17 @@ class Layer(
if not isinstance(deps_type, type) or not issubclass(deps_type, LayerDeps):
raise TypeError(f"{cls.__name__}.deps_type must be a LayerDeps subclass.")
_get_dep_specs(deps_type)
_init_schema_type(cls, "config_type", _infer_schema_type(cls, 3, "config_type"), EmptyLayerConfig)
_init_schema_type(cls, "config_type", _infer_schema_type(cls, 4, "config_type"), EmptyLayerConfig)
_init_schema_type(
cls,
"runtime_state_type",
_infer_schema_type(cls, 4, "runtime_state_type"),
_infer_schema_type(cls, 5, "runtime_state_type"),
EmptyRuntimeState,
)
_init_schema_type(
cls,
"runtime_handles_type",
_infer_schema_type(cls, 5, "runtime_handles_type"),
_infer_schema_type(cls, 6, "runtime_handles_type"),
EmptyRuntimeHandles,
)
@ -260,14 +262,14 @@ class Layer(
runtime_handles=cast(_RuntimeHandlesT, self.runtime_handles_type.model_validate({})),
)
def bind_deps(self, deps: Mapping[str, "Layer[Any, Any, Any, Any, Any, Any] | None"]) -> None:
def bind_deps(self, deps: Mapping[str, "Layer[Any, Any, Any, Any, Any, Any, Any] | None"]) -> None:
"""Bind this layer's declared dependencies from a name-to-layer mapping.
The mapping may include more layers than the declared dependency fields.
Only names declared by ``deps_type`` are selected and validated. Missing
optional deps are bound as ``None``.
"""
resolved_deps: dict[str, Layer[Any, Any, Any, Any, Any, Any] | None] = {}
resolved_deps: dict[str, Layer[Any, Any, Any, Any, Any, Any, Any] | None] = {}
for name, spec in _get_dep_specs(self.deps_type).items():
if name not in deps:
if spec.optional:
@ -338,6 +340,10 @@ class Layer(
def suffix_prompts(self) -> Sequence[_PromptT]:
return []
@property
def user_prompts(self) -> Sequence[_UserPromptT]:
return []
@property
def tools(self) -> Sequence[_ToolT]:
return []
@ -347,6 +353,11 @@ class Layer(
"""Wrap a native prompt item for compositor aggregation."""
raise NotImplementedError
@abstractmethod
def wrap_user_prompt(self, prompt: _UserPromptT) -> object:
"""Wrap a native user prompt item for compositor aggregation."""
raise NotImplementedError
@abstractmethod
def wrap_tool(self, tool: _ToolT) -> object:
"""Wrap a native tool item for compositor aggregation."""
@ -382,14 +393,14 @@ def _as_dep_spec(annotation: object) -> LayerDepSpec | None:
return LayerDepSpec(layer_type=layer_type)
def _as_layer_type(annotation: object) -> type[Layer[Any, Any, Any, Any, Any, Any]] | None:
def _as_layer_type(annotation: object) -> type[Layer[Any, Any, Any, Any, Any, Any, Any]] | None:
runtime_type = get_origin(annotation) or annotation
if isinstance(runtime_type, type) and issubclass(runtime_type, Layer):
return cast(type[Layer[Any, Any, Any, Any, Any, Any]], runtime_type)
return cast(type[Layer[Any, Any, Any, Any, Any, Any, Any]], runtime_type)
return None
def _infer_deps_type(layer_type: type[Layer[Any, Any, Any, Any, Any, Any]]) -> type[LayerDeps] | None:
def _infer_deps_type(layer_type: type[Layer[Any, Any, Any, Any, Any, Any, Any]]) -> type[LayerDeps] | None:
inferred = _infer_layer_generic_arg(layer_type, 0, {})
if inferred is None:
return None
@ -397,7 +408,7 @@ def _infer_deps_type(layer_type: type[Layer[Any, Any, Any, Any, Any, Any]]) -> t
def _infer_schema_type(
layer_type: type[Layer[Any, Any, Any, Any, Any, Any]],
layer_type: type[Layer[Any, Any, Any, Any, Any, Any, Any]],
index: int,
attr_name: str,
) -> type[BaseModel] | None:
@ -411,7 +422,7 @@ def _infer_schema_type(
def _infer_schema_generic_arg(
layer_type: type[Layer[Any, Any, Any, Any, Any, Any]],
layer_type: type[Layer[Any, Any, Any, Any, Any, Any, Any]],
attr_name: str,
substitutions: Mapping[object, object],
) -> object | None:
@ -441,7 +452,7 @@ def _infer_schema_generic_arg(
def _infer_layer_generic_arg(
layer_type: type[Layer[Any, Any, Any, Any, Any, Any]],
layer_type: type[Layer[Any, Any, Any, Any, Any, Any, Any]],
index: int,
substitutions: Mapping[object, object],
) -> object | None:
@ -470,7 +481,7 @@ def _infer_layer_generic_arg(
def _init_schema_type(
layer_type: type[Layer[Any, Any, Any, Any, Any, Any]],
layer_type: type[Layer[Any, Any, Any, Any, Any, Any, Any]],
attr_name: str,
inferred_schema_type: type[BaseModel] | None,
default_schema_type: type[BaseModel],
@ -531,7 +542,7 @@ def _as_model_type(value: object) -> type[BaseModel] | None:
return None
def _is_generic_layer_template(layer_type: type[Layer[Any, Any, Any, Any, Any, Any]]) -> bool:
def _is_generic_layer_template(layer_type: type[Layer[Any, Any, Any, Any, Any, Any, Any]]) -> bool:
return bool(getattr(layer_type, "__type_params__", ())) or bool(
getattr(layer_type, "__parameters__", ())
)

View File

@ -1,11 +1,12 @@
"""Typed layer family definitions.
``Layer`` itself is framework-neutral. This module defines typed layer families
that bind its prompt/tool generic slots to concrete contracts, such as ordinary
string prompts with plain callable tools or pydantic-ai prompt/tool shapes. The
families keep the trailing schema generic slots open so concrete layers can have
``config_type``, ``runtime_state_type``, and ``runtime_handles_type`` inferred
from type arguments instead of repeated class attributes.
that bind its system prompt, user prompt, and tool generic slots to concrete
contracts, such as ordinary strings with plain callable tools or pydantic-ai
prompt/tool shapes. The families keep the trailing schema generic slots open so
concrete layers can have ``config_type``, ``runtime_state_type``, and
``runtime_handles_type`` inferred from type arguments instead of repeated class
attributes.
Tagged aggregate aliases cover code paths that can accept any supported
prompt/tool family without changing the plain and pydantic-ai layer contracts.
Pydantic-ai names are imported for static analysis only, so ``agenton`` can be
@ -23,6 +24,7 @@ from typing_extensions import TypeVar, final, override
if TYPE_CHECKING:
from pydantic_ai import Tool
from pydantic_ai.messages import UserContent
from pydantic_ai.tools import SystemPromptFunc
from pydantic import BaseModel
@ -30,10 +32,12 @@ from pydantic import BaseModel
from agenton.layers.base import EmptyLayerConfig, EmptyRuntimeHandles, EmptyRuntimeState, Layer, LayerDeps
type PlainPrompt = str
type PlainUserPrompt = str
type PlainTool = Callable[..., Any]
type PydanticAIPrompt[AgentDepsT] = SystemPromptFunc[AgentDepsT]
type PydanticAIUserPrompt = UserContent
type PydanticAITool[AgentDepsT] = Tool[AgentDepsT]
@ -53,6 +57,14 @@ class PlainToolType:
kind: Literal["plain"] = field(default="plain", init=False)
@dataclass(frozen=True, slots=True)
class PlainUserPromptType:
"""Tagged plain user prompt item for aggregate user prompt transformations."""
value: PlainUserPrompt
kind: Literal["plain"] = field(default="plain", init=False)
@dataclass(frozen=True, slots=True)
class PydanticAIPromptType[AgentDepsT]:
"""Tagged pydantic-ai prompt item for aggregate prompt transformations."""
@ -61,6 +73,14 @@ class PydanticAIPromptType[AgentDepsT]:
kind: Literal["pydantic_ai"] = field(default="pydantic_ai", init=False)
@dataclass(frozen=True, slots=True)
class PydanticAIUserPromptType:
"""Tagged pydantic-ai user prompt item for aggregate user prompts."""
value: PydanticAIUserPrompt
kind: Literal["pydantic_ai"] = field(default="pydantic_ai", init=False)
@dataclass(frozen=True, slots=True)
class PydanticAIToolType[AgentDepsT]:
"""Tagged pydantic-ai tool item for aggregate tool transformations."""
@ -70,6 +90,7 @@ class PydanticAIToolType[AgentDepsT]:
type AllPromptTypes = PlainPromptType | PydanticAIPromptType[Any]
type AllUserPromptTypes = PlainUserPromptType | PydanticAIUserPromptType
type AllToolTypes = PlainToolType | PydanticAIToolType[Any]
@ -82,7 +103,15 @@ _AgentDepsT = TypeVar("_AgentDepsT")
class PlainLayer(
Generic[_DepsT, _ConfigT, _RuntimeStateT, _RuntimeHandlesT],
Layer[_DepsT, PlainPrompt, PlainTool, _ConfigT, _RuntimeStateT, _RuntimeHandlesT],
Layer[
_DepsT,
PlainPrompt,
PlainUserPrompt,
PlainTool,
_ConfigT,
_RuntimeStateT,
_RuntimeHandlesT,
],
):
"""Layer base for ordinary string prompts and plain-callable tools."""
@ -91,6 +120,11 @@ class PlainLayer(
def wrap_prompt(self, prompt: PlainPrompt) -> PlainPromptType:
return PlainPromptType(prompt)
@final
@override
def wrap_user_prompt(self, prompt: PlainUserPrompt) -> PlainUserPromptType:
return PlainUserPromptType(prompt)
@final
@override
def wrap_tool(self, tool: PlainTool) -> PlainToolType:
@ -102,6 +136,7 @@ class PydanticAILayer(
Layer[
_DepsT,
PydanticAIPrompt[_AgentDepsT],
PydanticAIUserPrompt,
PydanticAITool[_AgentDepsT],
_ConfigT,
_RuntimeStateT,
@ -118,6 +153,11 @@ class PydanticAILayer(
) -> PydanticAIPromptType[_AgentDepsT]:
return PydanticAIPromptType(prompt)
@final
@override
def wrap_user_prompt(self, prompt: PydanticAIUserPrompt) -> PydanticAIUserPromptType:
return PydanticAIUserPromptType(prompt)
@final
@override
def wrap_tool(self, tool: PydanticAITool[_AgentDepsT]) -> PydanticAIToolType[_AgentDepsT]:
@ -126,15 +166,20 @@ class PydanticAILayer(
__all__ = [
"AllPromptTypes",
"AllUserPromptTypes",
"AllToolTypes",
"PlainLayer",
"PlainPrompt",
"PlainPromptType",
"PlainUserPrompt",
"PlainUserPromptType",
"PlainTool",
"PlainToolType",
"PydanticAILayer",
"PydanticAIPrompt",
"PydanticAIPromptType",
"PydanticAIUserPrompt",
"PydanticAIUserPromptType",
"PydanticAITool",
"PydanticAIToolType",
]

View File

@ -20,6 +20,7 @@ class PromptLayerConfig(BaseModel):
"""Serializable config schema for ``PromptLayer``."""
prefix: list[str] | str = Field(default_factory=list)
user: list[str] | str = Field(default_factory=list)
suffix: list[str] | str = Field(default_factory=list)
model_config = ConfigDict(extra="forbid")
@ -38,18 +39,19 @@ class ObjectLayer[ObjectT](PlainLayer[NoLayerDeps]):
@dataclass
class PromptLayer(PlainLayer[NoLayerDeps, PromptLayerConfig]):
"""Layer that contributes configured prefix and suffix prompt fragments."""
"""Layer that contributes configured system and user prompt fragments."""
type_id = "plain.prompt"
prefix: list[str] | str = field(default_factory=list)
user: list[str] | str = field(default_factory=list)
suffix: list[str] | str = field(default_factory=list)
@classmethod
def from_config(cls, config: BaseModel):
"""Create a prompt layer from validated prompt config."""
validated_config = PromptLayerConfig.model_validate(config)
return cls(prefix=validated_config.prefix, suffix=validated_config.suffix)
return cls(prefix=validated_config.prefix, user=validated_config.user, suffix=validated_config.suffix)
@property
def prefix_prompts(self) -> list[str]:
@ -63,6 +65,12 @@ class PromptLayer(PlainLayer[NoLayerDeps, PromptLayerConfig]):
return [self.suffix]
return self.suffix
@property
def user_prompts(self) -> list[str]:
if isinstance(self.user, str):
return [self.user]
return self.user
@dataclass
class ToolsLayer(PlainLayer[NoLayerDeps]):

View File

@ -5,19 +5,21 @@ This module keeps pydantic-ai's callable shapes intact through
one explicit graph node that provides the object used as
``RunContext[ObjectT].deps`` in pydantic-ai prompt and tool callables.
Bridge construction accepts pydantic-ai's ergonomic input forms and normalizes
them at the layer boundary: string prompts become zero-arg system prompt
functions, and bare tool functions become ``Tool`` instances.
them at the layer boundary: string system prompts become zero-arg system prompt
functions, user prompts stay as pydantic-ai ``UserContent`` values, and bare
tool functions become ``Tool`` instances.
"""
from collections.abc import Sequence
from dataclasses import dataclass
from pydantic_ai import Tool
from pydantic_ai.messages import UserContent
from pydantic_ai.tools import ToolFuncEither
from typing_extensions import override
from agenton.layers.base import LayerDeps
from agenton.layers.types import PydanticAILayer, PydanticAIPrompt, PydanticAITool
from agenton.layers.types import PydanticAILayer, PydanticAIPrompt, PydanticAITool, PydanticAIUserPrompt
from agenton_collections.layers.plain.basic import ObjectLayer
@ -34,6 +36,7 @@ class PydanticAIBridgeLayer[ObjectT](
"""Bridge layer for pydantic-ai prompts and tools using one object deps."""
prefix: str | PydanticAIPrompt[ObjectT] | Sequence[str | PydanticAIPrompt[ObjectT]] = ()
user: UserContent | Sequence[UserContent] = ()
suffix: str | PydanticAIPrompt[ObjectT] | Sequence[str | PydanticAIPrompt[ObjectT]] = ()
tool_entries: Sequence[PydanticAITool[ObjectT] | ToolFuncEither[ObjectT, ...]] = ()
@ -52,6 +55,11 @@ class PydanticAIBridgeLayer[ObjectT](
def suffix_prompts(self) -> list[PydanticAIPrompt[ObjectT]]:
return _normalize_prompts(self.suffix)
@property
@override
def user_prompts(self) -> list[PydanticAIUserPrompt]:
return _normalize_user_prompts(self.user)
@property
@override
def tools(self) -> list[PydanticAITool[ObjectT]]:
@ -76,6 +84,16 @@ def _normalize_prompt[ObjectT](
return prompt
def _normalize_user_prompts(
prompts: UserContent | Sequence[UserContent],
) -> list[PydanticAIUserPrompt]:
if isinstance(prompts, str):
return [prompts]
if isinstance(prompts, Sequence):
return list(prompts)
return [prompts]
def _normalize_tool[ObjectT](
tool_entry: PydanticAITool[ObjectT] | ToolFuncEither[ObjectT, ...],
) -> PydanticAITool[ObjectT]:

View File

@ -1,7 +1,8 @@
"""Pydantic AI compositor transformer presets.
This module owns the pydantic-ai runtime dependency for transforming tagged
agenton prompt/tool items into pydantic-ai-compatible items.
agenton system prompt, user prompt, and tool items into pydantic-ai-compatible
items.
"""
from collections.abc import Sequence
@ -13,8 +14,10 @@ from agenton.compositor import CompositorTransformerKwargs
from agenton.layers.types import (
AllPromptTypes,
AllToolTypes,
AllUserPromptTypes,
PydanticAIPrompt,
PydanticAITool,
PydanticAIUserPrompt,
)
type PydanticAICompositorTransformerKwargs = CompositorTransformerKwargs[
@ -22,6 +25,8 @@ type PydanticAICompositorTransformerKwargs = CompositorTransformerKwargs[
PydanticAITool[object],
AllPromptTypes,
AllToolTypes,
PydanticAIUserPrompt,
AllUserPromptTypes,
]
@ -39,6 +44,20 @@ def _pydantic_ai_prompt_transformer(
return result
def _pydantic_ai_user_prompt_transformer(
prompts: Sequence[AllUserPromptTypes],
) -> list[PydanticAIUserPrompt]:
result: list[PydanticAIUserPrompt] = []
for prompt in prompts:
if prompt.kind == "plain":
result.append(prompt.value)
elif prompt.kind == "pydantic_ai":
result.append(prompt.value)
else:
raise NotImplementedError(f"Unsupported user prompt type: {type(prompt).__qualname__}.")
return result
def _pydantic_ai_tool_transformer(
tools: Sequence[AllToolTypes],
) -> list[PydanticAITool[object]]:
@ -55,6 +74,7 @@ def _pydantic_ai_tool_transformer(
PYDANTIC_AI_TRANSFORMERS: Final[PydanticAICompositorTransformerKwargs] = {
"prompt_transformer": _pydantic_ai_prompt_transformer,
"user_prompt_transformer": _pydantic_ai_user_prompt_transformer,
"tool_transformer": _pydantic_ai_tool_transformer,
}

View File

@ -54,12 +54,13 @@ def test_builder_creates_config_layers_with_typed_validation() -> None:
.add_config_layer(
name="prompt",
type="plain.prompt",
config={"prefix": "hello", "suffix": ["bye"]},
config={"prefix": "hello", "user": "ask politely", "suffix": ["bye"]},
)
.build()
)
assert [prompt.value for prompt in compositor.prompts] == ["hello", "bye"]
assert [prompt.value for prompt in compositor.user_prompts] == ["ask politely"]
try:
CompositorBuilder(registry).add_config_layer(

View File

@ -6,15 +6,17 @@ from inspect import Parameter, signature
from typing_extensions import override
from agenton.compositor import Compositor, CompositorTransformerKwargs
from agenton.layers import NoLayerDeps, PlainLayer, PlainPromptType, PlainToolType
from agenton.layers import NoLayerDeps, PlainLayer, PlainPromptType, PlainToolType, PlainUserPromptType
type ToolCallable = Callable[..., object]
type WrappedPrompt = tuple[str, str]
type WrappedUserPrompt = tuple[str, str]
@dataclass(slots=True)
class PromptAndToolLayer(PlainLayer[NoLayerDeps]):
prefix: list[str]
user: list[str]
suffix: list[str]
tool_entries: list[ToolCallable]
@ -28,6 +30,11 @@ class PromptAndToolLayer(PlainLayer[NoLayerDeps]):
def suffix_prompts(self) -> list[str]:
return self.suffix
@property
@override
def user_prompts(self) -> list[str]:
return self.user
@property
@override
def tools(self) -> list[ToolCallable]:
@ -46,6 +53,10 @@ def wrap_prompts(prompts: Sequence[PlainPromptType]) -> list[WrappedPrompt]:
return [("wrapped", prompt.value) for prompt in prompts]
def wrap_user_prompts(prompts: Sequence[PlainUserPromptType]) -> list[WrappedUserPrompt]:
return [("wrapped-user", prompt.value) for prompt in prompts]
def describe_tools(tools: Sequence[PlainToolType]) -> list[str]:
return [tool.value.__name__ for tool in tools]
@ -79,6 +90,7 @@ def test_compositor_transforms_prompts_to_another_type_after_layer_ordering() ->
"first",
PromptAndToolLayer(
prefix=["first-prefix"],
user=[],
suffix=["first-suffix"],
tool_entries=[],
),
@ -87,6 +99,7 @@ def test_compositor_transforms_prompts_to_another_type_after_layer_ordering() ->
"second",
PromptAndToolLayer(
prefix=["second-prefix"],
user=[],
suffix=["second-suffix"],
tool_entries=[],
),
@ -110,7 +123,7 @@ def test_compositor_transforms_tools_to_another_type_after_layer_aggregation() -
[
(
"tools",
PromptAndToolLayer(prefix=[], suffix=[], tool_entries=[base_tool, wrapped_tool]),
PromptAndToolLayer(prefix=[], user=[], suffix=[], tool_entries=[base_tool, wrapped_tool]),
)
]
),
@ -118,3 +131,33 @@ def test_compositor_transforms_tools_to_another_type_after_layer_aggregation() -
)
assert compositor.tools == ["base_tool", "wrapped_tool"]
def test_compositor_transforms_user_prompts_after_layer_ordering() -> None:
compositor: Compositor[
PlainPromptType,
PlainToolType,
PlainPromptType,
PlainToolType,
WrappedUserPrompt,
PlainUserPromptType,
] = Compositor(
layers=OrderedDict(
[
(
"first",
PromptAndToolLayer(prefix=[], user=["first-user"], suffix=[], tool_entries=[]),
),
(
"second",
PromptAndToolLayer(prefix=[], user=["second-user"], suffix=[], tool_entries=[]),
),
]
),
user_prompt_transformer=wrap_user_prompts,
)
assert compositor.user_prompts == [
("wrapped-user", "first-user"),
("wrapped-user", "second-user"),
]

View File

@ -27,16 +27,19 @@ def raw_tool(ctx: RunContext[Profile], topic: str) -> str:
def test_pydantic_ai_bridge_layer_accepts_mixed_string_and_function_prompts() -> None:
layer = PydanticAIBridgeLayer[Profile](
prefix=("plain prefix", profile_prompt),
user=("first user", "second user"),
suffix="plain suffix",
)
prefix_prompts = layer.prefix_prompts
user_prompts = layer.user_prompts
suffix_prompts = layer.suffix_prompts
plain_prefix = cast(Callable[[], str], prefix_prompts[0])
plain_suffix = cast(Callable[[], str], suffix_prompts[0])
assert plain_prefix() == "plain prefix"
assert prefix_prompts[1] is profile_prompt
assert user_prompts == ["first user", "second user"]
assert plain_suffix() == "plain suffix"

View File

@ -6,8 +6,10 @@ from pydantic_ai import Tool
from agenton.layers.types import (
PlainPromptType,
PlainToolType,
PlainUserPromptType,
PydanticAIPromptType,
PydanticAIToolType,
PydanticAIUserPromptType,
)
from agenton_collections.transformers.pydantic_ai import PYDANTIC_AI_TRANSFORMERS
@ -46,6 +48,14 @@ def test_pydantic_ai_transformers_accept_mixed_tagged_prompt_types() -> None:
assert result[1] is dynamic_prompt
def test_pydantic_ai_transformers_accept_tagged_user_prompt_types() -> None:
result = PYDANTIC_AI_TRANSFORMERS["user_prompt_transformer"](
[PlainUserPromptType("plain user"), PydanticAIUserPromptType("pydantic user")]
)
assert result == ["plain user", "pydantic user"]
def test_pydantic_ai_transformers_wrap_tagged_plain_tools() -> None:
result = PYDANTIC_AI_TRANSFORMERS["tool_transformer"]([PlainToolType(plain_tool)])

View File

@ -26,6 +26,7 @@ def test_agenton_basics_example_smoke() -> None:
assert result.returncode == 0, result.stderr
assert "Prompts:" in result.stdout
assert "User prompts:" in result.stdout
assert "Tools:" in result.stdout
assert "Lifecycle: ['create', 'suspend', 'resume', 'delete']" in result.stdout
@ -35,6 +36,7 @@ def test_agenton_pydantic_ai_example_smoke() -> None:
assert result.returncode == 0, result.stderr
assert "SystemPromptPart: Prefer concrete details." in result.stdout
assert "UserPromptPart: [\"Use the tools for 'layer composition'.\"]" in result.stdout
assert "ToolCallPart: count_words(" in result.stdout
assert "ToolCallPart: write_tagline(" in result.stdout
assert "TextPart:" in result.stdout