diff --git a/examples/langgraph-checkpointer/agent.py b/examples/langgraph-checkpointer/agent.py index 8ea98ae8..8f29e310 100644 --- a/examples/langgraph-checkpointer/agent.py +++ b/examples/langgraph-checkpointer/agent.py @@ -50,7 +50,7 @@ def assistant(state: MessagesState): ) builder.add_edge('tools', 'assistant') -memory = DaprCheckpointer(store_name='statestore', key_prefix='dapr') +memory = DaprCheckpointer(state_store_name='statestore', key_prefix='dapr') react_graph_memory = builder.compile(checkpointer=memory) config = {'configurable': {'thread_id': '1'}} diff --git a/examples/langgraph-checkpointer/components/agent-registry.yaml b/examples/langgraph-checkpointer/components/agent-registry.yaml new file mode 100644 index 00000000..b6c1fe4d --- /dev/null +++ b/examples/langgraph-checkpointer/components/agent-registry.yaml @@ -0,0 +1,14 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: agent-registry +spec: + type: state.redis + version: v1 + metadata: + - name: redisHost + value: localhost:6379 + - name: redisPassword + value: "" + - name: keyPrefix + value: none \ No newline at end of file diff --git a/examples/langgraph-checkpointer/dapr-llm.yaml b/examples/langgraph-checkpointer/dapr-llm.yaml new file mode 100644 index 00000000..9bc53240 --- /dev/null +++ b/examples/langgraph-checkpointer/dapr-llm.yaml @@ -0,0 +1,12 @@ +# https://docs.dapr.io/developing-applications/local-development/multi-app-dapr-run/multi-app-template/#template-properties +version: 1 +common: + resourcesPath: ./components + logLevel: info + appLogDestination: console + daprdLogDestination: console + +apps: +- appID: langgraph + appDirPath: ./ + command: ["python3", "agent.py"] \ No newline at end of file diff --git a/examples/strands-agent/README.md b/examples/strands-agent/README.md new file mode 100644 index 00000000..6e271ccc --- /dev/null +++ b/examples/strands-agent/README.md @@ -0,0 +1,168 @@ +# Dapr For Agents - Strands Agent with Persistent Session Storage + +Supporting Dapr-backed session persistence for Strands Agent SDK with distributed state storage. + +## Overview + +This example demonstrates how to use a **real Strands Agent** from the Strands Agent SDK together with `DaprSessionManager` for distributed session persistence. The example shows: + +- Creating a Strands Agent with the official Strands SDK +- Using DaprSessionManager for distributed session storage across restarts +- Tool integration (weather checking example) +- Conversation history persistence and restoration +- Seamless LLM integration through Strands model providers + +**Note:** This uses the actual [Strands Agent SDK](https://strandsagents.com/), not just session types. + +## Pre-requisites + +- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started) +- [Install Python 3.10+](https://www.python.org/downloads/) +- OpenAI API key (or configure a different model provider) + +## Install Dependencies + +```sh +pip3 install -r requirements.txt +``` + +Set your API key: +```sh +export OPENAI_API_KEY=your-key-here +``` + +## Run the Example + +Run the following command in a terminal/command prompt: + +```sh +dapr run --app-id strands-agent --resources-path ./components -- python3 agent.py +``` + +### What to Expect + +The example will: + +1. Create a Strands Agent with: + - GPT-4o model via OpenAIModel provider + - A `get_weather` tool function + - System prompt and agent metadata + - DaprSessionManager for session persistence +2. Process user queries: + - "What's the weather in San Francisco?" → Agent uses get_weather tool + - "How about New York?" → Agent continues conversation with context +3. Persist all conversation state to Dapr state store +4. On subsequent runs, automatically restore full conversation history + +Run the example again to see the conversation resume from where it left off! + +### Example Output + +**First run:** +``` +📂 Using session: assistant-session-1 +✅ Created Strands Agent: weather-assistant + Model: OpenAIModel(model='gpt-4o') + Tools: ['get_weather'] + Session Manager: DaprSessionManager + +🆕 Starting fresh conversation + +👤 USER: What's the weather in San Francisco? +🤖 ASSISTANT: The weather in San Francisco is sunny and 72°F + +👤 USER: How about New York? +🤖 ASSISTANT: Let me check that for you. The weather in New York is sunny and 72°F + +✅ Conversation complete! +🔄 Run again to resume the conversation with full history from Dapr state store. +``` + +**Second run (conversation resumes):** +``` +📂 Using session: assistant-session-1 +✅ Created Strands Agent: weather-assistant + Model: OpenAIModel(model='gpt-4o') + Tools: ['get_weather'] + Session Manager: DaprSessionManager + +💬 Resuming conversation with 4 previous messages +──────────────────────────────────────────────────────────── +👤 USER: What's the weather in San Francisco? +🤖 ASSISTANT: The weather in San Francisco is sunny and 72°F +👤 USER: How about New York? +🤖 ASSISTANT: Let me check that for you. The weather in New York is sunny and 72°F +──────────────────────────────────────────────────────────── + +👤 USER: What's the weather in San Francisco? +🤖 ASSISTANT: I just checked that - it's still sunny and 72°F in San Francisco! +``` + +## Key Features + +### Real Strands Agent +- Uses the official Strands Agent SDK from strandsagents.com +- Full agent capabilities: tools, system prompts, state management +- Multiple LLM provider support (Anthropic, OpenAI, Bedrock, etc.) + +### Distributed Session Persistence +- DaprSessionManager stores all conversation state in Dapr state stores +- Supports any Dapr state store: Redis, PostgreSQL, MongoDB, Cosmos DB, etc. +- Automatic conversation restoration across application restarts +- Full message history maintained + +### Tool Integration +- Define Python functions as tools +- Agent automatically calls tools when needed +- Tool results integrated into conversation flow + +### LLM Provider Flexibility +- Easy to swap model providers (AnthropicModel, OpenAIModel, etc.) +- Configure model parameters (temperature, max tokens, etc.) +- Strands handles all LLM interactions + +### State Persistence +- Automatic state synchronization with Dapr +- Support for TTL and consistency levels +- Compatible with any Dapr state store (Redis, PostgreSQL, Cosmos DB, etc.) + +## Customization + +You can customize the session manager with: + +```python +session_manager = DaprSessionManager( + session_id='my-session', + state_store_name='statestore', + dapr_client=dapr_client, + ttl=3600, # Optional: TTL in seconds + consistency='strong', # Optional: 'eventual' or 'strong' +) +``` + +## Configuration + +### State Store + +The example uses a Redis state store component. You can modify [components/statestore.yaml](./components/statestore.yaml) to use a different state store backend supported by Dapr. + +### Conversation Provider + +The example uses the `echo` conversation component by default (which echoes back your input). To use a real LLM: + +1. Set up a conversation component (e.g., OpenAI, Anthropic) in `components/conversation.yaml` +2. Update the `conversation_provider` variable in `agent.py` to match your component name +3. Set required API keys as environment variables + +Example for OpenAI: +```bash +export OPENAI_API_KEY="your-api-key" +``` + +See [examples/conversation](../conversation/) for more conversation component examples. + +## Learn More + +- [Dapr State Management](https://docs.dapr.io/developing-applications/building-blocks/state-management/) +- [Strands Framework](https://github.com/microsoft/strands) +- [Dapr Python SDK](https://github.com/dapr/python-sdk) diff --git a/examples/strands-agent/agent.py b/examples/strands-agent/agent.py new file mode 100644 index 00000000..5203225f --- /dev/null +++ b/examples/strands-agent/agent.py @@ -0,0 +1,105 @@ +""" +Example demonstrating a Strands Agent with DaprSessionManager for persistent session storage. + +This example shows how to: +- Create a Strands Agent with the Strands Agent SDK +- Use DaprSessionManager for distributed session persistence +- Leverage LLM providers through Strands +- Maintain conversation history across restarts +""" + +import os + +from dapr.ext.strands import DaprSessionManager +from strands import Agent, tool +from strands.models import OpenAIModel + +from dapr.clients import DaprClient + + +@tool +def get_weather(location: str) -> str: + """Get the current weather for a location. + + Args: + location: The city and state, e.g. "San Francisco, CA" + + Returns: + A description of the current weather + """ + return f'The weather in {location} is sunny and 72°F' + + +def run_agent_conversation(): + """Run a Strands Agent with Dapr session persistence.""" + + openai_api_key = os.getenv('OPENAI_API_KEY') + if not openai_api_key: + print('❌ Error: OPENAI_API_KEY environment variable not set') + print('💡 Set it with: export OPENAI_API_KEY=your-key-here') + return + + session_id = 'assistant-session-1' + agent_id = 'weather-assistant' + + with DaprClient() as dapr_client: + session_manager = DaprSessionManager( + session_id=session_id, state_store_name='statestore', dapr_client=dapr_client + ) + + agent = Agent( + model=OpenAIModel(model_id='gpt-4o'), + system_prompt=( + 'You are a helpful weather assistant. ' + 'You can check the weather for any location. ' + 'Be concise and friendly in your responses.' + ), + tools=[get_weather], + agent_id=agent_id, + name='Weather Assistant', + description='An AI assistant that helps users check the weather', + state={ + 'role': 'Weather Assistant', + 'goal': 'Help users get weather information', + 'instructions': ['Be concise', 'Be friendly', 'Always use the get_weather tool'], + 'max_iterations': 5, + }, + session_manager=session_manager, + ) + + queries = [ + "What's the weather in San Francisco?", + 'How about New York?', + ] + + for query in queries: + print(f'👤 USER: {query}') + + try: + import asyncio + + response = asyncio.run(agent.invoke_async(query)) + + if hasattr(response, 'content'): + content = response.content + elif hasattr(response, 'text'): + content = response.text + else: + content = str(response) + + print(f'🤖 ASSISTANT: {content}') + + except Exception as e: + print(f'❌ Error: {e}') + print('💡 Tip: Make sure you have OPENAI_API_KEY set in your environment.') + print(' Or switch to a different model provider (Anthropic, Bedrock, etc.)') + break + + print() + + print('✅ Conversation complete!') + print('🔄 Run again to resume the conversation with full history from Dapr state store.') + + +if __name__ == '__main__': + run_agent_conversation() diff --git a/examples/strands-agent/components/agent-registry.yaml b/examples/strands-agent/components/agent-registry.yaml new file mode 100644 index 00000000..b6c1fe4d --- /dev/null +++ b/examples/strands-agent/components/agent-registry.yaml @@ -0,0 +1,14 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: agent-registry +spec: + type: state.redis + version: v1 + metadata: + - name: redisHost + value: localhost:6379 + - name: redisPassword + value: "" + - name: keyPrefix + value: none \ No newline at end of file diff --git a/examples/strands-agent/components/conversation.yaml b/examples/strands-agent/components/conversation.yaml new file mode 100644 index 00000000..674ba38e --- /dev/null +++ b/examples/strands-agent/components/conversation.yaml @@ -0,0 +1,8 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: echo +spec: + type: conversation.echo + version: v1 + metadata: [] diff --git a/examples/strands-agent/components/statestore.yaml b/examples/strands-agent/components/statestore.yaml new file mode 100644 index 00000000..2f676bff --- /dev/null +++ b/examples/strands-agent/components/statestore.yaml @@ -0,0 +1,14 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: statestore +spec: + type: state.redis + version: v1 + metadata: + - name: redisHost + value: localhost:6379 + - name: redisPassword + value: "" + - name: actorStateStore + value: "true" diff --git a/examples/strands-agent/dapr-llm.yaml b/examples/strands-agent/dapr-llm.yaml new file mode 100644 index 00000000..d2916cf7 --- /dev/null +++ b/examples/strands-agent/dapr-llm.yaml @@ -0,0 +1,12 @@ +# https://docs.dapr.io/developing-applications/local-development/multi-app-dapr-run/multi-app-template/#template-properties +version: 1 +common: + resourcesPath: ./components + logLevel: info + appLogDestination: console + daprdLogDestination: console + +apps: +- appID: strands-agent + appDirPath: ./ + command: ["python3", "agent.py"] \ No newline at end of file diff --git a/examples/strands-agent/requirements.txt b/examples/strands-agent/requirements.txt new file mode 100644 index 00000000..e98a79e6 --- /dev/null +++ b/examples/strands-agent/requirements.txt @@ -0,0 +1,3 @@ +dapr>=1.15.0 +dapr-ext-strands>=0.1.0 +strands-agents>=1.24.0 diff --git a/ext/dapr-ext-agent_core/LICENSE b/ext/dapr-ext-agent_core/LICENSE new file mode 100644 index 00000000..be033a7f --- /dev/null +++ b/ext/dapr-ext-agent_core/LICENSE @@ -0,0 +1,203 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 The Dapr Authors. + + and others that have contributed code to the public domain. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/ext/dapr-ext-agent_core/README.rst b/ext/dapr-ext-agent_core/README.rst new file mode 100644 index 00000000..16f73bb1 --- /dev/null +++ b/ext/dapr-ext-agent_core/README.rst @@ -0,0 +1,22 @@ +dapr-ext-agent_core extension +======================= + +|pypi| + +.. |pypi| image:: https://badge.fury.io/py/dapr-ext-agent-core.svg + :target: https://pypi.org/project/dapr-ext-agent-core/ + +This is the Dapr Agent Core extension for Dapr for Agents in the Dapr Python SDK. + +Installation +------------ + +:: + + pip install dapr-ext-agent_core + +References +---------- + +* `Dapr `_ +* `Dapr Python-SDK `_ diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py new file mode 100644 index 00000000..87d6d7f1 --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py @@ -0,0 +1,26 @@ +from .introspection import detect_framework, find_agent_in_stack +from .metadata import AgentRegistryAdapter +from .types import ( + AgentMetadata, + AgentMetadataSchema, + LLMMetadata, + MemoryMetadata, + PubSubMetadata, + RegistryMetadata, + SupportedFrameworks, + ToolMetadata, +) + +__all__ = [ + 'SupportedFrameworks', + 'AgentMetadataSchema', + 'AgentMetadata', + 'LLMMetadata', + 'PubSubMetadata', + 'ToolMetadata', + 'RegistryMetadata', + 'MemoryMetadata', + 'AgentRegistryAdapter', + 'find_agent_in_stack', + 'detect_framework', +] diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py new file mode 100644 index 00000000..dc04e029 --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py @@ -0,0 +1,118 @@ +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import gc +import inspect +import logging +from typing import Any, Optional + +logger = logging.getLogger(__name__) + + +def find_agent_in_stack() -> Optional[Any]: + """ + Walk up the call stack to find an agent/graph object. + + Currently supports: + - LangGraph: CompiledStateGraph or SyncPregelLoop + - Strands: DaprSessionManager + - Dapr Agents: Any object from dapr_agents module + + Returns: + The agent/graph object if found, None otherwise. + """ + # First, try to find in the stack + for frame_info in inspect.stack(): + frame_locals = frame_info.frame.f_locals + + # Look for 'self' in frame locals + if 'self' in frame_locals: + obj = frame_locals['self'] + obj_type = type(obj).__name__ + obj_module = type(obj).__module__ + + # LangGraph support - CompiledStateGraph + if obj_type == 'CompiledStateGraph': + return obj + + # Dapr Agents support - any agent from dapr_agents module + # Use GC to find the actual derived agent that may reference this object + if 'dapr_agents' in obj_module: + # If this is a base class (DaprInfra, AgentBase), use GC to find derived agent + if obj_type in ('DaprInfra', 'AgentBase'): + referrers = gc.get_referrers(obj) + for ref in referrers: + ref_type = type(ref).__name__ + ref_module = type(ref).__module__ + # Look for derived agent classes that own this base object + if 'dapr_agents' in ref_module and ref_type not in ( + 'DaprInfra', + 'AgentBase', + ): + return ref + # Return the object directly if it's already a derived class + return obj + + # Strands support - DaprSessionManager + # Use gc to find the Agent that owns this session manager (similar to LangGraph checkpointer) + if obj_type == 'DaprSessionManager': + referrers = gc.get_referrers(obj) + for ref in referrers: + ref_type = type(ref).__name__ + ref_module = type(ref).__module__ + # Look for Strands Agent that owns this session manager + if ref_type == 'Agent' and 'strands' in ref_module: + return ref + # Don't register bare DaprSessionManager - only register when Agent exists + return None + + # If we found a checkpointer, use gc to find the graph that references it + if obj_type == 'DaprCheckpointer': + # Use garbage collector to find objects referencing this checkpointer + referrers = gc.get_referrers(obj) + for ref in referrers: + ref_type = type(ref).__name__ + if ref_type == 'CompiledStateGraph': + return ref + + return None + + +def detect_framework(agent: Any) -> Optional[str]: + """ + Detect the framework type from an agent object. + + Args: + agent: The agent object to inspect. + + Returns: + Framework name string if detected, None otherwise. + """ + agent_type = type(agent).__name__ + agent_module = type(agent).__module__ + + # LangGraph + if agent_type == 'CompiledStateGraph' or 'langgraph' in agent_module: + return 'langgraph' + + # Dapr Agents + if 'dapr_agents' in agent_module: + return 'dapr_agents' + + # Strands - detect both Agent class and DaprSessionManager + if agent_type == 'Agent' and 'strands' in agent_module: + return 'strands' + if agent_type == 'DaprSessionManager': + return 'strands' + + return None diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py new file mode 100644 index 00000000..18d4498b --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py @@ -0,0 +1,11 @@ +from .base import BaseAgentMapper +from .dapr_agents import DaprAgentsMapper +from .langgraph import LangGraphMapper +from .strands import StrandsMapper + +__all__ = [ + 'BaseAgentMapper', + 'DaprAgentsMapper', + 'LangGraphMapper', + 'StrandsMapper', +] diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py new file mode 100644 index 00000000..a4ff81f7 --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py @@ -0,0 +1,70 @@ +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from abc import ABC, abstractmethod +from typing import Any + +from dapr.ext.agent_core.types import AgentMetadataSchema + + +class BaseAgentMapper(ABC): + """Abstract base class for agent metadata mappers. + + Provides common functionality for extracting metadata from different + agent frameworks (Strands, LangGraph, Dapr Agents). + """ + + @staticmethod + def _extract_provider(module_name: str) -> str: + """Extract provider name from module path. + + Args: + module_name: Python module name (e.g., 'langchain_openai.chat_models') + + Returns: + Provider identifier (e.g., 'openai', 'anthropic', 'azure_openai') + """ + module_lower = module_name.lower() + + # Check more specific providers first + if 'vertexai' in module_lower: + return 'vertexai' + elif 'bedrock' in module_lower: + return 'bedrock' + elif 'azure' in module_lower: + return 'azure_openai' + elif 'openai' in module_lower: + return 'openai' + elif 'anthropic' in module_lower: + return 'anthropic' + elif 'ollama' in module_lower: + return 'ollama' + elif 'google' in module_lower or 'gemini' in module_lower: + return 'google' + elif 'cohere' in module_lower: + return 'cohere' + + return 'unknown' + + @abstractmethod + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + """Map agent to standardized metadata schema. + + Args: + agent: Framework-specific agent instance + schema_version: Schema version to use + + Returns: + AgentMetadataSchema with extracted metadata + """ + pass diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py new file mode 100644 index 00000000..d3dbd541 --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py @@ -0,0 +1,101 @@ +import gc +import json +import logging +from datetime import datetime, timezone +from typing import Any + +from dapr.ext.agent_core.mapping.base import BaseAgentMapper +from dapr.ext.agent_core.types import ( + AgentMetadata, + AgentMetadataSchema, + LLMMetadata, + MemoryMetadata, + PubSubMetadata, + RegistryMetadata, + ToolMetadata, +) + +logger = logging.getLogger(__name__) + + +class DaprAgentsMapper(BaseAgentMapper): + def __init__(self) -> None: + pass + + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + # If we received a base class (DaprInfra, AgentBase), try to find the actual derived agent via GC + agent_type = type(agent).__name__ + if agent_type in ('DaprInfra', 'AgentBase'): + referrers = gc.get_referrers(agent) + for ref in referrers: + ref_type = type(ref).__name__ + ref_module = type(ref).__module__ + # Look for derived agent classes in dapr_agents module + if 'dapr_agents' in ref_module and ref_type not in ('DaprInfra', 'AgentBase'): + agent = ref + break + + profile = getattr(agent, 'profile', None) + memory = getattr(agent, 'memory', None) + pubsub = getattr(agent, 'pubsub', None) + llm = getattr(agent, 'llm', None) + registry = getattr(agent, '_registry', None) + execution = getattr(agent, 'execution', None) + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid=getattr(agent, 'appid', ''), + type=type(agent).__name__, + orchestrator=False, + role=getattr(profile, 'role', '') if profile else '', + goal=getattr(profile, 'goal', '') if profile else '', + instructions=getattr(profile, 'instructions', None) if profile else [], + statestore=getattr(memory, 'store_name', '') if memory else '', + system_prompt=getattr(profile, 'system_prompt', '') if profile else '', + ), + name=getattr(agent, 'name', ''), + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=PubSubMetadata( + name=getattr(pubsub, 'pubsub_name', '') if pubsub else '', + broadcast_topic=getattr(pubsub, 'broadcast_topic', None) if pubsub else None, + agent_topic=getattr(pubsub, 'agent_topic', None) if pubsub else None, + ), + memory=MemoryMetadata( + type=type(memory).__name__ if memory else '', + session_id=getattr(memory, 'session_id', None) if memory else None, + statestore=getattr(memory, 'store_name', None) if memory else None, + ), + llm=LLMMetadata( + client=type(llm).__name__ if llm else '', + provider=getattr(llm, 'provider', 'unknown') if llm else 'unknown', + api=getattr(llm, 'api', 'unknown') if llm else 'unknown', + model=getattr(llm, 'model', 'unknown') if llm else 'unknown', + component_name=getattr(llm, 'component_name', None) if llm else None, + base_url=getattr(llm, 'base_url', None) if llm else None, + azure_endpoint=getattr(llm, 'azure_endpoint', None) if llm else None, + azure_deployment=getattr(llm, 'azure_deployment', None) if llm else None, + prompt_template=type(getattr(llm, 'prompt_template', None)).__name__ + if llm and getattr(llm, 'prompt_template', None) + else None, + ), + registry=RegistryMetadata( + statestore=getattr(getattr(registry, 'store', None), 'store_name', None) + if registry + else None, + name=getattr(registry, 'team_name', None) if registry else None, + ), + tools=[ + ToolMetadata( + tool_name=getattr(tool, 'name', ''), + tool_description=getattr(tool, 'description', ''), + tool_args=json.dumps(getattr(tool, 'args_schema', {})) + if hasattr(tool, 'args_schema') + else '{}', + ) + for tool in getattr(agent, 'tools', []) + ], + max_iterations=getattr(execution, 'max_iterations', None) if execution else None, + tool_choice=getattr(execution, 'tool_choice', None) if execution else None, + agent_metadata=getattr(agent, 'agent_metadata', None), + ) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py new file mode 100644 index 00000000..08ddce4d --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py @@ -0,0 +1,145 @@ +import logging +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any, Dict, Optional + +from dapr.ext.agent_core.mapping.base import BaseAgentMapper +from dapr.ext.agent_core.types import ( + AgentMetadata, + AgentMetadataSchema, + LLMMetadata, + MemoryMetadata, + PubSubMetadata, + RegistryMetadata, + ToolMetadata, +) +from langgraph.pregel._read import PregelNode + +if TYPE_CHECKING: + from dapr.ext.langgraph import DaprCheckpointer + +logger = logging.getLogger(__name__) + + +class LangGraphMapper(BaseAgentMapper): + def __init__(self) -> None: + pass + + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + introspected_vars: Dict[str, Any] = vars(agent) # type: ignore + + nodes: Dict[str, object] = introspected_vars.get('nodes', {}) # type: ignore + + tools: list[Dict[str, Any]] = [] + llm_metadata: Optional[Dict[str, Any]] = None + system_prompt: Optional[str] = None + + for node_name, obj in nodes.items(): # type: ignore + if node_name == '__start__': + # We don't want to process the start node + continue + + if isinstance(obj, PregelNode): + node_vars = vars(obj) + if 'bound' in node_vars.keys(): + bound = node_vars['bound'] + + # Check if it's a ToolNode + if hasattr(bound, '_tools_by_name'): + tools_by_name = getattr(bound, '_tools_by_name', {}) + tools.extend( + [ + { + 'name': name, + 'description': getattr(tool, 'description', ''), + 'args_schema': getattr( + tool, 'args_schema', {} + ), # TODO: See if we can extract the pydantic model + } + for name, tool in tools_by_name.items() + ] + ) + + # Check if it's an assistant RunnableCallable + elif type(bound).__name__ == 'RunnableCallable': + logger.info(f"Node '{node_name}' is a RunnableCallable") + + func = getattr(bound, 'func', None) + if func and hasattr(func, '__globals__'): + func_globals = func.__globals__ + + for _, global_value in func_globals.items(): + var_type = type(global_value).__name__ + var_module = type(global_value).__module__ + + if 'chat' in var_type.lower(): + model = getattr(global_value, 'model_name', None) or getattr( + global_value, 'model', None + ) + + if model and not llm_metadata: + llm_metadata = { + 'client': var_type, + 'provider': self._extract_provider(var_module), + 'model': model, + 'base_url': getattr(global_value, 'base_url', None), + } + + # Look for system message + elif var_type == 'SystemMessage': + content = getattr(global_value, 'content', None) + if content and not system_prompt: + system_prompt = content + + checkpointer: Optional['DaprCheckpointer'] = introspected_vars.get('checkpointer', None) # type: ignore + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid='', + type=type(agent).__name__, + orchestrator=False, + role='Assistant', + goal=system_prompt or '', + instructions=[], + statestore=checkpointer.state_store_name if checkpointer else None, # type: ignore + system_prompt='', + ), + name=agent.get_name() if hasattr(agent, 'get_name') else '', + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=PubSubMetadata( + name='', + broadcast_topic=None, + agent_topic=None, + ), + memory=MemoryMetadata( + type='DaprCheckpointer', + session_id=None, + statestore=checkpointer.state_store_name if checkpointer else None, # type: ignore + ), + llm=LLMMetadata( + client=llm_metadata.get('client', '') if llm_metadata else '', + provider=llm_metadata.get('provider', 'unknown') if llm_metadata else 'unknown', + api='chat', + model=llm_metadata.get('model', 'unknown') if llm_metadata else 'unknown', + component_name=None, + base_url=llm_metadata.get('base_url') if llm_metadata else None, + azure_endpoint=llm_metadata.get('azure_endpoint') if llm_metadata else None, + azure_deployment=llm_metadata.get('azure_deployment') if llm_metadata else None, + prompt_template=None, + ), + registry=RegistryMetadata( + statestore=None, + name=None, + ), + tools=[ + ToolMetadata( + tool_name=tool.get('name', ''), + tool_description=tool.get('description', ''), + tool_args='', + ) + for tool in tools + ], + max_iterations=1, + tool_choice='auto', + agent_metadata=None, + ) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py new file mode 100644 index 00000000..56ede1b1 --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py @@ -0,0 +1,443 @@ +import logging +from datetime import datetime, timezone +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from dapr.ext.agent_core.mapping.base import BaseAgentMapper +from dapr.ext.agent_core.types import ( + AgentMetadata, + AgentMetadataSchema, + LLMMetadata, + MemoryMetadata, + RegistryMetadata, + ToolMetadata, +) + +if TYPE_CHECKING: + from dapr.ext.strands import DaprSessionManager + from strands.types.session import SessionAgent + +logger = logging.getLogger(__name__) + + +class StrandsMapper(BaseAgentMapper): + def __init__(self) -> None: + pass + + def _is_strands_agent(self, agent: Any) -> bool: + """Check if agent is an actual Strands Agent (not just session manager).""" + agent_type = type(agent).__name__ + agent_module = type(agent).__module__ + return agent_type == 'Agent' and 'strands' in agent_module + + def _extract_from_strands_agent(self, agent: Any) -> Dict[str, Any]: + """Extract metadata from a real Strands Agent. + + Args: + agent: A strands.Agent instance + + Returns: + Dictionary with extracted metadata + """ + metadata = {} + + # Basic agent info + metadata['agent_id'] = getattr(agent, 'agent_id', None) or getattr(agent, 'name', 'agent') + metadata['name'] = getattr(agent, 'name', None) + metadata['description'] = getattr(agent, 'description', None) + + # System prompt + system_prompt = getattr(agent, 'system_prompt', None) + if system_prompt: + if isinstance(system_prompt, str): + metadata['system_prompt'] = system_prompt + elif isinstance(system_prompt, list): + # Join list of content blocks + metadata['system_prompt'] = ' '.join(str(block) for block in system_prompt) + + # Agent state (custom metadata) + state = getattr(agent, 'state', {}) + try: + if state: + # Convert to dict if it's JSONSerializableDict (which doesn't support .get(key, default)) + state_dict = dict(state) if hasattr(state, '__iter__') else {} + metadata['role'] = state_dict.get('role') or metadata.get('name', 'Agent') + metadata['goal'] = state_dict.get('goal') or metadata.get('description', '') + metadata['instructions'] = state_dict.get('instructions') or [] + metadata['max_iterations'] = state_dict.get('max_iterations') + else: + metadata['role'] = metadata.get('name', 'Agent') + metadata['goal'] = metadata.get('description', '') + metadata['instructions'] = [] + except Exception: + metadata['role'] = metadata.get('name', 'Agent') + metadata['goal'] = metadata.get('description', '') + metadata['instructions'] = [] + + # LLM Model + model = getattr(agent, 'model', None) + if model: + model_type = type(model).__name__ + + # Check if model has a config + config = getattr(model, 'config', None) + + # Try multiple possible attribute names for the model ID + model_id = ( + getattr(model, 'model_id', None) + or getattr(model, 'model', None) + or getattr(model, 'name', None) + ) + if config: + if isinstance(config, dict) and 'model_id' in config: + model_id = config['model_id'] + elif hasattr(config, 'model'): + model_id = config.model + elif hasattr(config, 'model_id'): + model_id = config.model_id + + if model_id is None: + model_id = 'unknown' + + # Extract provider from model module or type + model_module = type(model).__module__ + provider = self._extract_provider(model_module) + + # Fallback: parse from model type name if provider still unknown + if provider == 'unknown': + provider = model_type.replace('Model', '').lower() + + metadata['llm'] = { + 'provider': provider, + 'model': str(model_id), + 'model_type': model_type, + } + + # Tools - extract from tool_registry.registry + # In Strands, tools are stored in agent.tool_registry.registry as AgentTool objects + tools_metadata = [] + tool_registry = getattr(agent, 'tool_registry', None) + if tool_registry and hasattr(tool_registry, 'registry'): + registry_dict = tool_registry.registry + if isinstance(registry_dict, dict): + for tool_name, agent_tool in registry_dict.items(): + tool_info = { + 'name': tool_name, + 'description': getattr(agent_tool, 'description', '') + or getattr(agent_tool, '__doc__', ''), + } + tools_metadata.append(tool_info) + + metadata['tools'] = tools_metadata + + # Session manager - stored as _session_manager in Strands Agent + session_manager = getattr(agent, '_session_manager', None) + if session_manager and type(session_manager).__name__ == 'DaprSessionManager': + metadata['session_manager'] = session_manager + metadata['state_store'] = getattr(session_manager, 'state_store_name', None) + metadata['session_id'] = getattr(session_manager, '_session_id', None) + + return metadata + + def _get_session_agent( + self, session_manager: 'DaprSessionManager', session_id: str + ) -> Optional['SessionAgent']: + """ + Get the primary SessionAgent from a session. + + Reads the session manifest to find agents, and returns the first one. + + Args: + session_manager: The DaprSessionManager instance + session_id: Session ID to read from + + Returns: + SessionAgent if found, None otherwise + """ + try: + # Use the session manager's method to get the manifest key + manifest_key = session_manager._get_manifest_key(session_id) + logger.info(f'Reading manifest from key: {manifest_key}') + manifest = session_manager._read_state(manifest_key) + + logger.info(f'Manifest content: {manifest}') + + if not manifest or 'agents' not in manifest or not manifest['agents']: + logger.warning( + f'No agents found in session {session_id} - session may not have agents yet. Use fallback metadata.' + ) + return None + + # Get the first agent (primary agent) + agent_id = manifest['agents'][0] + logger.info(f"Found agent '{agent_id}' in session {session_id}") + session_agent = session_manager.read_agent(session_id, agent_id) + + if session_agent: + logger.info( + f"Successfully loaded SessionAgent '{agent_id}' with state keys: {list(session_agent.state.keys()) if session_agent.state else []}" + ) + else: + logger.warning(f"read_agent returned None for agent_id '{agent_id}'") + + return session_agent + + except Exception as e: + logger.error( + f'Failed to read SessionAgent from session {session_id}: {e}', exc_info=True + ) + return None + + def _extract_llm_metadata(self, agent_state: Dict[str, Any]) -> Optional[LLMMetadata]: + """ + Extract LLM metadata from SessionAgent state. + + Looks for llm_component, conversation_provider, or llm_config in state. + + Args: + agent_state: The SessionAgent.state dictionary + + Returns: + LLMMetadata if LLM configuration found, None otherwise + """ + # Check for various LLM configuration keys + llm_component = agent_state.get('llm_component') or agent_state.get('conversation_provider') + llm_config = agent_state.get('llm_config', {}) + + if not llm_component and not llm_config: + return None + + # Extract LLM details + provider = llm_config.get('provider', 'unknown') + model = llm_config.get('model', 'unknown') + + return LLMMetadata( + client='dapr_conversation', + provider=provider, + api='conversation', + model=model, + component_name=llm_component, + ) + + def _extract_tools_metadata(self, agent_state: Dict[str, Any]) -> List[ToolMetadata]: + """ + Extract tools metadata from SessionAgent state. + + Args: + agent_state: The SessionAgent.state dictionary + + Returns: + List of ToolMetadata (empty list if no tools configured) + """ + tools_list = agent_state.get('tools') + + if not tools_list or not isinstance(tools_list, list): + return [] + + tool_metadata_list: List[ToolMetadata] = [] + for tool in tools_list: + if isinstance(tool, dict): + tool_metadata_list.append( + ToolMetadata( + tool_name=str(tool.get('name', 'unknown')), + tool_description=str(tool.get('description', '')), + tool_args=str(tool.get('args', {})), + ) + ) + elif hasattr(tool, 'name'): + # Handle tool objects + tool_metadata_list.append( + ToolMetadata( + tool_name=str(getattr(tool, 'name', 'unknown')), + tool_description=str(getattr(tool, 'description', '')), + tool_args=str(getattr(tool, 'args', {})), + ) + ) + + return tool_metadata_list + + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + """ + Map Strands Agent or DaprSessionManager to AgentMetadataSchema. + + Handles two cases: + 1. Real Strands Agent (strands.Agent) - extracts from agent properties + 2. DaprSessionManager - extracts from stored SessionAgent in state + + Args: + agent: Either a strands.Agent or DaprSessionManager instance + schema_version: Version of the schema + + Returns: + AgentMetadataSchema with extracted metadata + """ + + # Case 1: Real Strands Agent from the SDK + if self._is_strands_agent(agent): + extracted = self._extract_from_strands_agent(agent) + + agent_id = extracted.get('agent_id', 'agent') + agent_name = extracted.get('name', agent_id) + session_id = extracted.get('session_id') + + # Build full agent name + if session_id: + full_name = f'strands-{session_id}-{agent_id}' + else: + full_name = f'strands-{agent_id}' + + # Extract LLM metadata + llm_info = extracted.get('llm') + if llm_info: + llm_metadata = LLMMetadata( + client=llm_info.get( + 'model_type', 'unknown' + ), # OpenAIModel, AnthropicModel, etc. + provider=llm_info.get('provider', 'unknown'), # openai, anthropic, etc. + api='chat', # Strands uses chat-based APIs + model=llm_info.get('model', 'unknown'), # gpt-4o, claude-3-opus, etc. + component_name=None, + ) + else: + llm_metadata = None + + # Extract tools metadata + tools_list = extracted.get('tools', []) + tools_metadata = [] + for tool in tools_list: + if isinstance(tool, dict): + tools_metadata.append( + ToolMetadata( + tool_name=tool.get('name', 'unknown'), + tool_description=tool.get('description', ''), + tool_args='', + ) + ) + + # Get session manager info for memory and statestore + state_store_name = extracted.get('state_store') + session_id_value = extracted.get('session_id') + + # Determine memory type and populate session info + has_session_manager = extracted.get('session_manager') is not None + memory_type = 'DaprSessionManager' if has_session_manager else 'InMemory' + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid='', + type='Strands', + orchestrator=False, + role=extracted.get('role', agent_name), + goal=extracted.get('goal', ''), + instructions=extracted.get('instructions') + if extracted.get('instructions') + else None, + statestore=state_store_name, # Set from session manager + system_prompt=extracted.get('system_prompt'), + ), + name=full_name, + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=None, + memory=MemoryMetadata( + type=memory_type, + session_id=session_id_value, # Set session_id + statestore=state_store_name, # Set statestore + ), + llm=llm_metadata, + tools=tools_metadata, # Already a list, could be empty [] + tool_choice='auto' + if tools_metadata + else None, # Set tool_choice based on whether tools exist + max_iterations=extracted.get('max_iterations'), + registry=RegistryMetadata( + name=None, + team='default', + ), + agent_metadata={ + 'framework': 'strands', + 'agent_id': agent_id, + 'session_id': session_id_value, + 'state_store': state_store_name, + }, + ) + + # Case 2: DaprSessionManager (legacy approach) + session_manager: 'DaprSessionManager' = agent + + # Extract session manager info + state_store_name = getattr(session_manager, 'state_store_name', None) + session_id = getattr(session_manager, '_session_id', None) + + # Try to get the primary SessionAgent + session_agent = self._get_session_agent(session_manager, session_id) if session_id else None + + # Extract agent-specific metadata if SessionAgent exists + if session_agent: + agent_id = session_agent.agent_id + agent_state = session_agent.state or {} + + # Extract from state + system_prompt = agent_state.get('system_prompt') + role = str(agent_state.get('role', agent_id)) + goal = str(agent_state.get('goal', '')) + instructions_raw = agent_state.get('instructions', []) + instructions = list(instructions_raw) if isinstance(instructions_raw, list) else [] + tool_choice = agent_state.get('tool_choice') + max_iterations = agent_state.get('max_iterations') + + # Extract LLM metadata + llm_metadata = self._extract_llm_metadata(agent_state) + + # Extract tools metadata + tools_metadata = self._extract_tools_metadata(agent_state) + + agent_name = f'strands-{session_id}-{agent_id}' + else: + # Fallback when no SessionAgent found + agent_id = None + agent_state = {} + system_prompt = None + role = 'Session Manager' + goal = 'Manages multi-agent sessions with distributed state storage' + instructions = [] + tool_choice = None + max_iterations = None + llm_metadata = None + tools_metadata = [] + agent_name = f'strands-session-{session_id}' if session_id else 'strands-session' + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid='', + type='Strands', + orchestrator=False, + role=role, + goal=goal, + instructions=instructions if instructions else None, + statestore=state_store_name, + system_prompt=system_prompt, + ), + name=agent_name, + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=None, + memory=MemoryMetadata( + type='DaprSessionManager', + session_id=session_id, + statestore=state_store_name, + ), + llm=llm_metadata, + tools=tools_metadata, # Already a list, could be empty [] + tool_choice=tool_choice, + max_iterations=max_iterations, + registry=RegistryMetadata( + name=None, + team='default', + ), + agent_metadata={ + 'framework': 'strands', + 'session_id': session_id, + 'agent_id': agent_id, + 'state_store': state_store_name, + }, + ) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py new file mode 100644 index 00000000..c554fc4e --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py @@ -0,0 +1,290 @@ +from __future__ import annotations + +import logging +import random +import time +from importlib.metadata import PackageNotFoundError, version +from typing import Any, Callable, Dict, Optional, Sequence + +import dapr.ext.agent_core.mapping +from dapr.ext.agent_core.introspection import detect_framework, find_agent_in_stack +from dapr.ext.agent_core.types import AgentMetadataSchema, SupportedFrameworks +from dapr_agents.agents.configs import ( + AgentRegistryConfig, +) +from dapr_agents.storage.daprstores.stateservice import StateStoreError, StateStoreService + +from dapr.clients import DaprClient +from dapr.clients.grpc._response import ( + GetMetadataResponse, + RegisteredComponents, +) +from dapr.clients.grpc._state import Concurrency, Consistency + +logger = logging.getLogger(__name__) + + +class AgentRegistryAdapter: + @classmethod + def create_from_stack( + cls, registry: Optional[AgentRegistryConfig] = None + ) -> Optional['AgentRegistryAdapter']: + """ + Auto-detect and create an AgentRegistryAdapter by walking the call stack. + + Args: + registry: Optional registry configuration. If None, will attempt auto-discovery. + + Returns: + AgentRegistryAdapter instance if agent found, None otherwise. + """ + agent = find_agent_in_stack() + if not agent: + return None + + framework = detect_framework(agent) + if not framework: + return None + + return cls(registry=registry, framework=framework, agent=agent) + + def __init__(self, registry: Optional[AgentRegistryConfig], framework: str, agent: Any) -> None: + self._registry = registry + + try: + with DaprClient(http_timeout_seconds=10) as _client: + resp: GetMetadataResponse = _client.get_metadata() + self.appid = resp.application_id + if self._registry is None: + components: Sequence[RegisteredComponents] = resp.registered_components + for component in components: + if 'state' in component.type and component.name == 'agent-registry': + self._registry = AgentRegistryConfig( + store=StateStoreService(store_name=component.name), + team_name='default', + ) + except TimeoutError: + logger.warning('Dapr sidecar not responding; proceeding without auto-configuration.') + + if self._registry is None: + return + + self.registry_state: StateStoreService = self._registry.store + self._registry_prefix: str = 'agents:' + self._meta: Dict[str, str] = {'contentType': 'application/json'} + self._max_etag_attempts: int = 10 + self._save_options: Dict[str, Any] = { + 'concurrency': Concurrency.first_write, + 'consistency': Consistency.strong, + } + + if not self._can_handle(framework): + raise ValueError(f"Adapter cannot handle framework '{framework}'") + + _metadata = self._extract_metadata(agent) + + # We need to handle some null values here to avoid issues during registration + if _metadata.agent.appid == '': + _metadata.agent.appid = self.appid or '' + + if _metadata.registry: + if _metadata.registry.name is None: + _metadata.registry.name = self._registry.team_name + if _metadata.registry.statestore is None: + _metadata.registry.statestore = self.registry_state.store_name + + self._register(_metadata) + + def _can_handle(self, framework: str) -> bool: + """Check if this adapter can handle the given Agent.""" + + for fw in SupportedFrameworks: + if framework.lower() == fw.value.lower(): + self._framework = fw + return True + return False + + def _extract_metadata(self, agent: Any) -> AgentMetadataSchema: + """Extract metadata from the given Agent.""" + + try: + schema_version = version('dapr-ext-agent_core') + except PackageNotFoundError: + schema_version = 'edge' + + framework_mappers = { + SupportedFrameworks.DAPR_AGENTS: dapr.ext.agent_core.mapping.DaprAgentsMapper().map_agent_metadata, + SupportedFrameworks.LANGGRAPH: dapr.ext.agent_core.mapping.LangGraphMapper().map_agent_metadata, + SupportedFrameworks.STRANDS: dapr.ext.agent_core.mapping.StrandsMapper().map_agent_metadata, + } + + mapper = framework_mappers.get(self._framework) + if not mapper: + raise ValueError(f"Adapter cannot handle framework '{self._framework}'") + + return mapper(agent=agent, schema_version=schema_version) + + def _register(self, metadata: AgentMetadataSchema) -> None: + """Register the adapter with the given Agent.""" + """ + Upsert this agent's metadata in the team registry. + + Args: + metadata: Additional metadata to store for this agent. + team: Team override; falls back to configured default team. + """ + if not metadata.registry: + raise ValueError('Registry metadata is required for registration') + + self._upsert_agent_entry( + team=metadata.registry.name, + agent_name=metadata.name, + agent_metadata=metadata.model_dump(), + ) + + def _mutate_registry_entry( + self, + *, + team: Optional[str], + mutator: Callable[[Dict[str, Any]], Optional[Dict[str, Any]]], + max_attempts: Optional[int] = None, + ) -> None: + """ + Apply a mutation to the team registry with optimistic concurrency. + + Args: + team: Team identifier. + mutator: Function that returns the updated registry dict (or None for no-op). + max_attempts: Override for concurrency retries; defaults to init value. + + Raises: + StateStoreError: If the mutation fails after retries due to contention. + """ + if not self.registry_state: + raise RuntimeError('registry_state must be provided to mutate the agent registry') + + key = f'agents:{team or "default"}' + self._meta['partitionKey'] = key + attempts = max_attempts or self._max_etag_attempts + + self._ensure_registry_initialized(key=key, meta=self._meta) + + for attempt in range(1, attempts + 1): + logger.debug(f"Mutating registry entry '{key}', attempt {attempt}/{attempts}") + try: + current, etag = self.registry_state.load_with_etag( + key=key, + default={}, + state_metadata=self._meta, + ) + if not isinstance(current, dict): + current = {} + + updated = mutator(dict(current)) + if updated is None: + return + + self.registry_state.save( + key=key, + value=updated, + etag=etag, + state_metadata=self._meta, + state_options=self._save_options, + ) + logger.debug(f"Successfully mutated registry entry '{key}'") + return + except Exception as exc: # noqa: BLE001 + logger.warning( + "Conflict during registry mutation (attempt %d/%d) for '%s': %s", + attempt, + attempts, + key, + exc, + ) + if attempt == attempts: + raise StateStoreError( + f"Failed to mutate agent registry key '{key}' after {attempts} attempts." + ) from exc + # Jittered backoff to reduce thundering herd during contention. + time.sleep(min(1.0 * attempt, 3.0) * (1 + random.uniform(0, 0.25))) + + def _upsert_agent_entry( + self, + *, + team: Optional[str], + agent_name: str, + agent_metadata: Dict[str, Any], + max_attempts: Optional[int] = None, + ) -> None: + """ + Insert/update a single agent record in the team registry. + + Args: + team: Team identifier. + agent_name: Agent name (key). + agent_metadata: Metadata value to write. + max_attempts: Override retry attempts. + """ + + def mutator(current: Dict[str, Any]) -> Optional[Dict[str, Any]]: + if current.get(agent_name) == agent_metadata: + return None + current[agent_name] = agent_metadata + return current + + logger.debug("Upserting agent '%s' in team '%s' registry", agent_name, team or 'default') + self._mutate_registry_entry( + team=team, + mutator=mutator, + max_attempts=max_attempts, + ) + + def _remove_agent_entry( + self, + *, + team: Optional[str], + agent_name: str, + max_attempts: Optional[int] = None, + ) -> None: + """ + Delete a single agent record from the team registry. + + Args: + team: Team identifier. + agent_name: Agent name (key). + max_attempts: Override retry attempts. + """ + + def mutator(current: Dict[str, Any]) -> Optional[Dict[str, Any]]: + if agent_name not in current: + return None + del current[agent_name] + return current + + self._mutate_registry_entry( + team=team, + mutator=mutator, + max_attempts=max_attempts, + ) + + def _ensure_registry_initialized(self, *, key: str, meta: Dict[str, str]) -> None: + """ + Ensure a registry document exists to create an ETag for concurrency control. + + Args: + key: Registry document key. + meta: Dapr state metadata to use for the operation. + """ + _, etag = self.registry_state.load_with_etag( # type: ignore[union-attr] + key=key, + default={}, + state_metadata=meta, + ) + if etag is None: + self.registry_state.save( # type: ignore[union-attr] + key=key, + value={}, + etag=None, + state_metadata=meta, + state_options=self._save_options, + ) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/py.typed b/ext/dapr-ext-agent_core/dapr/ext/agent_core/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py new file mode 100644 index 00000000..6c79d022 --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py @@ -0,0 +1,134 @@ +from enum import StrEnum +from typing import Any, Dict, List, Optional + +from pydantic import BaseModel, Field + + +class SupportedFrameworks(StrEnum): + DAPR_AGENTS = 'dapr_agents' + LANGGRAPH = 'langgraph' + STRANDS = 'strands' + + +class AgentMetadata(BaseModel): + """Metadata about an agent's configuration and capabilities.""" + + appid: str = Field(..., description='Dapr application ID of the agent') + type: str = Field(..., description='Type of the agent (e.g., standalone, durable)') + orchestrator: bool = Field(False, description='Indicates if the agent is an orchestrator') + role: str = Field(default='', description='Role of the agent') + goal: str = Field(default='', description='High-level objective of the agent') + instructions: Optional[List[str]] = Field( + default=None, description='Instructions for the agent' + ) + statestore: Optional[str] = Field( + default=None, description='Dapr state store component name used by the agent' + ) + system_prompt: Optional[str] = Field( + default=None, description="System prompt guiding the agent's behavior" + ) + + +class PubSubMetadata(BaseModel): + """Pub/Sub configuration information.""" + + name: str = Field(..., description='Pub/Sub component name') + broadcast_topic: Optional[str] = Field( + default=None, description='Pub/Sub topic for broadcasting messages' + ) + agent_topic: Optional[str] = Field( + default=None, description='Pub/Sub topic for direct agent messages' + ) + + +class MemoryMetadata(BaseModel): + """Memory configuration information.""" + + type: str = Field(..., description='Type of memory used by the agent') + statestore: Optional[str] = Field( + default=None, description='Dapr state store component name for memory' + ) + session_id: Optional[str] = Field( + default=None, description="Default session ID for the agent's memory" + ) + + +class LLMMetadata(BaseModel): + """LLM configuration information.""" + + client: str = Field(..., description='LLM client used by the agent') + provider: str = Field(..., description='LLM provider used by the agent') + api: str = Field(default='unknown', description='API type used by the LLM client') + model: str = Field(default='unknown', description='Model name or identifier') + component_name: Optional[str] = Field( + default=None, description='Dapr component name for the LLM client' + ) + base_url: Optional[str] = Field( + default=None, description='Base URL for the LLM API if applicable' + ) + azure_endpoint: Optional[str] = Field( + default=None, description='Azure endpoint if using Azure OpenAI' + ) + azure_deployment: Optional[str] = Field( + default=None, description='Azure deployment name if using Azure OpenAI' + ) + prompt_template: Optional[str] = Field( + default=None, description='Prompt template used by the agent' + ) + + +class ToolMetadata(BaseModel): + """Metadata about a tool available to the agent.""" + + tool_name: str = Field(..., description='Name of the tool') + tool_description: str = Field(..., description="Description of the tool's functionality") + tool_args: str = Field(..., description='Arguments for the tool') + + +class RegistryMetadata(BaseModel): + """Registry configuration information.""" + + statestore: Optional[str] = Field( + None, description='Name of the statestore component for the registry' + ) + name: Optional[str] = Field(default=None, description='Name of the team registry') + + +class AgentMetadataSchema(BaseModel): + """Schema for agent metadata including schema version.""" + + schema_version: str = Field( + ..., + description='Version of the schema used for the agent metadata.', + ) + agent: AgentMetadata = Field(..., description='Agent configuration and capabilities') + name: str = Field(..., description='Name of the agent') + registered_at: str = Field(..., description='ISO 8601 timestamp of registration') + pubsub: Optional[PubSubMetadata] = Field(None, description='Pub/sub configuration if enabled') + memory: Optional[MemoryMetadata] = Field(None, description='Memory configuration if enabled') + llm: Optional[LLMMetadata] = Field(None, description='LLM configuration') + registry: Optional[RegistryMetadata] = Field(None, description='Registry configuration') + tools: Optional[List[ToolMetadata]] = Field(None, description='Available tools') + max_iterations: Optional[int] = Field( + None, description='Maximum iterations for agent execution' + ) + tool_choice: Optional[str] = Field(None, description='Tool choice strategy') + agent_metadata: Optional[Dict[str, Any]] = Field( + None, description='Additional metadata about the agent' + ) + + @classmethod + def export_json_schema(cls, version: str) -> Dict[str, Any]: + """ + Export the JSON schema with version information. + + Args: + version: The dapr-agents version for this schema + + Returns: + JSON schema dictionary with metadata + """ + schema = cls.model_json_schema() + schema['$schema'] = 'https://json-schema.org/draft/2020-12/schema' + schema['version'] = version + return schema diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/version.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/version.py new file mode 100644 index 00000000..b81f0d98 --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +__version__ = '1.17.0.dev' diff --git a/ext/dapr-ext-agent_core/schemas/agent-metadata/index.json b/ext/dapr-ext-agent_core/schemas/agent-metadata/index.json new file mode 100644 index 00000000..aa27e236 --- /dev/null +++ b/ext/dapr-ext-agent_core/schemas/agent-metadata/index.json @@ -0,0 +1,7 @@ +{ + "current_version": "1.17.0.dev", + "schema_url": "https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent_core/schemas/agent-metadata/v1.17.0.dev.json", + "available_versions": [ + "v1.17.0.dev" + ] +} \ No newline at end of file diff --git a/ext/dapr-ext-agent_core/schemas/agent-metadata/latest.json b/ext/dapr-ext-agent_core/schemas/agent-metadata/latest.json new file mode 100644 index 00000000..2e56fd9a --- /dev/null +++ b/ext/dapr-ext-agent_core/schemas/agent-metadata/latest.json @@ -0,0 +1,461 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "1.17.0.dev" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent_core/schemas/agent-metadata/v1.17.0.dev.json b/ext/dapr-ext-agent_core/schemas/agent-metadata/v1.17.0.dev.json new file mode 100644 index 00000000..2e56fd9a --- /dev/null +++ b/ext/dapr-ext-agent_core/schemas/agent-metadata/v1.17.0.dev.json @@ -0,0 +1,461 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "1.17.0.dev" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent_core/scripts/generate_schema.py b/ext/dapr-ext-agent_core/scripts/generate_schema.py new file mode 100644 index 00000000..8b8f194f --- /dev/null +++ b/ext/dapr-ext-agent_core/scripts/generate_schema.py @@ -0,0 +1,112 @@ +import argparse +import json +from importlib.metadata import PackageNotFoundError, version +from pathlib import Path +from typing import Any, Optional + +from dapr.ext.agent_core import AgentMetadataSchema + + +def get_auto_version() -> str: + """Get current package version automatically.""" + try: + return version('dapr-ext-agent_core') + except PackageNotFoundError: + return '0.0.0.dev0' + + +def generate_schema(output_dir: Path, schema_version: Optional[str] = None): + """ + Generate versioned schema files. + + Args: + output_dir: Directory to output schema files + schema_version: Specific version to use. If None, auto-detects from package. + """ + # Use provided version or auto-detect + current_version = schema_version or get_auto_version() + + print(f'Generating schema for version: {current_version}') + schema_dir = output_dir / 'agent-metadata' + + # Export schema + schema: dict[Any, Any] = AgentMetadataSchema.export_json_schema(current_version) + + # Write versioned file + version_file = schema_dir / f'v{current_version}.json' + with open(version_file, 'w') as f: + json.dump(schema, f, indent=2) + print(f'✓ Generated {version_file}') + + # Write latest.json + latest_file = schema_dir / 'latest.json' + with open(latest_file, 'w') as f: + json.dump(schema, f, indent=2) + print(f'✓ Generated {latest_file}') + + # Write index with all versions + index: dict[Any, Any] = { + 'current_version': current_version, + 'schema_url': f'https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent_core/schemas/agent-metadata/v{current_version}.json', + 'available_versions': sorted([f.stem for f in schema_dir.glob('v*.json')], reverse=True), + } + + index_file = schema_dir / 'index.json' + with open(index_file, 'w') as f: + json.dump(index, f, indent=2) + print(f'✓ Generated {index_file}') + print(f'\nSchema generation complete for version {current_version}') + + +def main(): + """Main entry point with CLI argument parsing.""" + parser = argparse.ArgumentParser( + description='Generate JSON schema files for agent metadata', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Auto-detect version from installed package + python scripts/generate_schema.py + + # Generate schema for specific version + python scripts/generate_schema.py --version 1.0.0 + + # Generate for pre-release + python scripts/generate_schema.py --version 1.1.0-rc1 + + # Custom output directory + python scripts/generate_schema.py --version 1.0.0 --output ./custom-schemas + """, + ) + + parser.add_argument( + '--version', + '-v', + type=str, + default=None, + help='Specific version to use for schema generation. If not provided, auto-detects from installed package.', + ) + + parser.add_argument( + '--output', + '-o', + type=Path, + default=None, + help="Output directory for schemas. Defaults to 'schemas' in repo root.", + ) + + args = parser.parse_args() + + # Determine output directory + if args.output: + schemas_dir = args.output + else: + repo_root = Path(__file__).parent.parent + schemas_dir = repo_root / 'schemas' + + # Generate schemas + generate_schema(schemas_dir, schema_version=args.version) + + +if __name__ == '__main__': + main() diff --git a/ext/dapr-ext-agent_core/setup.cfg b/ext/dapr-ext-agent_core/setup.cfg new file mode 100644 index 00000000..665a4337 --- /dev/null +++ b/ext/dapr-ext-agent_core/setup.cfg @@ -0,0 +1,41 @@ +[metadata] +url = https://dapr.io/ +author = Dapr Authors +author_email = daprweb@microsoft.com +license = Apache +license_file = LICENSE +classifiers = + Development Status :: 5 - Production/Stable + Intended Audience :: Developers + License :: OSI Approved :: Apache Software License + Operating System :: OS Independent + Programming Language :: Python + Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3.12 + Programming Language :: Python :: 3.13 + Programming Language :: Python :: 3.14 +project_urls = + Documentation = https://github.com/dapr/docs + Source = https://github.com/dapr/python-sdk + +[options] +python_requires = >=3.11 +packages = find_namespace: +include_package_data = True +install_requires = + dapr >= 1.17.0.dev + dapr-agents >= 0.10.7 + pydantic >= 2.12.5 + langgraph >= 0.3.6 + strands-agents >= 1.24.0 + +[options.packages.find] +include = + dapr.* + +exclude = + tests + +[options.package_data] +dapr.ext.agent_core = + py.typed diff --git a/ext/dapr-ext-agent_core/setup.py b/ext/dapr-ext-agent_core/setup.py new file mode 100644 index 00000000..d23aed9f --- /dev/null +++ b/ext/dapr-ext-agent_core/setup.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os + +from setuptools import setup + +# Load version in dapr package. +version_info = {} +with open('dapr/ext/agent_core/version.py') as fp: + exec(fp.read(), version_info) +__version__ = version_info['__version__'] + + +def is_release(): + return '.dev' not in __version__ + + +name = 'dapr-ext-agent_core' +version = __version__ +description = 'The official release of Dapr Python SDK Agent Core Extension.' +long_description = """ +This is the core extension for Dapr Python SDK Agent integrations. +Dapr is a portable, serverless, event-driven runtime that makes it easy for developers to +build resilient, stateless and stateful microservices that run on the cloud and edge and +embraces the diversity of languages and developer frameworks. + +Dapr codifies the best practices for building microservice applications into open, +independent, building blocks that enable you to build portable applications with the language +and framework of your choice. Each building block is independent and you can use one, some, +or all of them in your application. +""".lstrip() + +# Get build number from GITHUB_RUN_NUMBER environment variable +build_number = os.environ.get('GITHUB_RUN_NUMBER', '0') + +if not is_release(): + name += '-dev' + version = f'{__version__}{build_number}' + description = ( + 'The developmental release for the Dapr Session Manager extension for Strands Agents' + ) + long_description = 'This is the developmental release for the Dapr Session Manager extension for Strands Agents' + +print(f'package name: {name}, version: {version}', flush=True) + + +setup( + name=name, + version=version, + description=description, + long_description=long_description, +) diff --git a/ext/dapr-ext-agent_core/tests/__init__.py b/ext/dapr-ext-agent_core/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ext/dapr-ext-agent_core/tests/test_base_mapper.py b/ext/dapr-ext-agent_core/tests/test_base_mapper.py new file mode 100644 index 00000000..4f003588 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_base_mapper.py @@ -0,0 +1,92 @@ +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest + +from dapr.ext.agent_core.mapping import BaseAgentMapper + + +class TestBaseAgentMapper(unittest.TestCase): + """Tests for BaseAgentMapper shared functionality.""" + + def test_extract_provider_openai(self): + """Test provider extraction for OpenAI modules.""" + self.assertEqual( + BaseAgentMapper._extract_provider('langchain_openai.chat_models'), 'openai' + ) + self.assertEqual(BaseAgentMapper._extract_provider('openai.resources'), 'openai') + self.assertEqual(BaseAgentMapper._extract_provider('strands.models.openai'), 'openai') + + def test_extract_provider_azure_openai(self): + """Test provider extraction for Azure OpenAI modules.""" + self.assertEqual( + BaseAgentMapper._extract_provider('langchain_openai.azure'), 'azure_openai' + ) + self.assertEqual(BaseAgentMapper._extract_provider('azure.openai'), 'azure_openai') + self.assertEqual(BaseAgentMapper._extract_provider('AZURE_OPENAI'), 'azure_openai') + + def test_extract_provider_anthropic(self): + """Test provider extraction for Anthropic modules.""" + self.assertEqual( + BaseAgentMapper._extract_provider('langchain_anthropic.chat_models'), 'anthropic' + ) + self.assertEqual(BaseAgentMapper._extract_provider('anthropic.client'), 'anthropic') + self.assertEqual(BaseAgentMapper._extract_provider('strands.models.anthropic'), 'anthropic') + + def test_extract_provider_ollama(self): + """Test provider extraction for Ollama modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_ollama'), 'ollama') + self.assertEqual(BaseAgentMapper._extract_provider('ollama.client'), 'ollama') + + def test_extract_provider_google(self): + """Test provider extraction for Google/Gemini modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_google_genai'), 'google') + self.assertEqual(BaseAgentMapper._extract_provider('google.generativeai'), 'google') + self.assertEqual(BaseAgentMapper._extract_provider('gemini.client'), 'google') + + def test_extract_provider_cohere(self): + """Test provider extraction for Cohere modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_cohere'), 'cohere') + self.assertEqual(BaseAgentMapper._extract_provider('cohere.client'), 'cohere') + + def test_extract_provider_bedrock(self): + """Test provider extraction for AWS Bedrock modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_aws.bedrock'), 'bedrock') + self.assertEqual(BaseAgentMapper._extract_provider('bedrock.client'), 'bedrock') + + def test_extract_provider_vertexai(self): + """Test provider extraction for Vertex AI modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_google_vertexai'), 'vertexai') + self.assertEqual(BaseAgentMapper._extract_provider('vertexai.client'), 'vertexai') + + def test_extract_provider_unknown(self): + """Test provider extraction for unknown modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('some.random.module'), 'unknown') + self.assertEqual(BaseAgentMapper._extract_provider('custom_llm'), 'unknown') + self.assertEqual(BaseAgentMapper._extract_provider(''), 'unknown') + + def test_extract_provider_case_insensitive(self): + """Test that provider extraction is case-insensitive.""" + self.assertEqual(BaseAgentMapper._extract_provider('OPENAI.CLIENT'), 'openai') + self.assertEqual(BaseAgentMapper._extract_provider('Anthropic.Client'), 'anthropic') + self.assertEqual(BaseAgentMapper._extract_provider('OlLaMa'), 'ollama') + + def test_extract_provider_priority_azure_over_openai(self): + """Test that Azure OpenAI takes priority when both keywords present.""" + # Azure should be detected before OpenAI + self.assertEqual(BaseAgentMapper._extract_provider('azure.openai.client'), 'azure_openai') + self.assertEqual(BaseAgentMapper._extract_provider('openai.azure.wrapper'), 'azure_openai') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_dapr_agents_metadata.py b/ext/dapr-ext-agent_core/tests/test_dapr_agents_metadata.py new file mode 100644 index 00000000..13d7861b --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_dapr_agents_metadata.py @@ -0,0 +1,276 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest + +from dapr.ext.agent_core.mapping.dapr_agents import DaprAgentsMapper + + +class MockProfile: + """Mock agent profile for testing.""" + + def __init__(self, role='assistant', goal='Help users', system_prompt='You are helpful.'): + self.role = role + self.goal = goal + self.instructions = ['Instruction 1', 'Instruction 2'] + self.system_prompt = system_prompt + + +class MockMemory: + """Mock memory for testing.""" + + def __init__(self, store_name='memory-store', session_id='session-1'): + self.store_name = store_name + self.session_id = session_id + + +class MockPubSub: + """Mock pubsub for testing.""" + + def __init__(self, pubsub_name='pubsub-component'): + self.pubsub_name = pubsub_name + self.broadcast_topic = 'broadcast' + self.agent_topic = 'agent-topic' + + +class MockLLM: + """Mock LLM for testing.""" + + def __init__(self): + self.provider = 'openai' + self.api = 'chat' + self.model = 'gpt-4' + self.component_name = None + self.base_url = None + self.azure_endpoint = None + self.azure_deployment = None + self.prompt_template = None + + +class MockTool: + """Mock tool for testing.""" + + def __init__(self, name='search', description='Search tool'): + self.name = name + self.description = description + self.args_schema = {'query': 'string'} + + +class MockRegistry: + """Mock registry for testing.""" + + def __init__(self): + self.store = MockRegistryStore() + self.team_name = 'team-alpha' + + +class MockRegistryStore: + """Mock registry store for testing.""" + + def __init__(self): + self.store_name = 'registry-store' + + +class MockExecution: + """Mock execution config for testing.""" + + def __init__(self): + self.max_iterations = 10 + self.tool_choice = 'auto' + + +class MockDaprAgent: + """Mock Dapr agent for testing.""" + + def __init__( + self, + name='test-agent', + appid='test-app', + profile=None, + memory=None, + pubsub=None, + llm=None, + tools=None, + registry=None, + execution=None, + ): + self.name = name + self.appid = appid + self.profile = profile + self.memory = memory + self.pubsub = pubsub + self.llm = llm + self.tools = tools or [] + self._registry = registry + self.execution = execution + self.agent_metadata = {'custom_key': 'custom_value'} + + +class DaprAgentsMapperTest(unittest.TestCase): + """Tests for DaprAgentsMapper metadata extraction.""" + + def test_mapper_instantiation(self): + """Test that DaprAgentsMapper can be instantiated.""" + mapper = DaprAgentsMapper() + self.assertIsNotNone(mapper) + + def test_minimal_agent_metadata_extraction(self): + """Test metadata extraction with minimal agent configuration.""" + agent = MockDaprAgent(name='minimal-agent', appid='app-1') + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.schema_version, '1.0.0') + self.assertEqual(metadata.name, 'minimal-agent') + self.assertEqual(metadata.agent.appid, 'app-1') + self.assertEqual(metadata.agent.type, 'MockDaprAgent') + + def test_full_agent_metadata_extraction(self): + """Test metadata extraction with full agent configuration.""" + agent = MockDaprAgent( + name='full-agent', + appid='app-2', + profile=MockProfile(role='coordinator', goal='Coordinate tasks'), + memory=MockMemory(store_name='mem-store', session_id='sess-1'), + pubsub=MockPubSub(pubsub_name='ps-component'), + llm=MockLLM(), + tools=[MockTool(name='tool1', description='Tool 1')], + registry=MockRegistry(), + execution=MockExecution(), + ) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + # Agent metadata + self.assertEqual(metadata.agent.role, 'coordinator') + self.assertEqual(metadata.agent.goal, 'Coordinate tasks') + self.assertEqual(metadata.agent.statestore, 'mem-store') + self.assertEqual(metadata.agent.system_prompt, 'You are helpful.') + + # Memory metadata + self.assertEqual(metadata.memory.type, 'MockMemory') + self.assertEqual(metadata.memory.statestore, 'mem-store') + self.assertEqual(metadata.memory.session_id, 'sess-1') + + # PubSub metadata + self.assertEqual(metadata.pubsub.name, 'ps-component') + self.assertEqual(metadata.pubsub.broadcast_topic, 'broadcast') + self.assertEqual(metadata.pubsub.agent_topic, 'agent-topic') + + # LLM metadata + self.assertEqual(metadata.llm.provider, 'openai') + self.assertEqual(metadata.llm.model, 'gpt-4') + + # Tools + self.assertEqual(len(metadata.tools), 1) + self.assertEqual(metadata.tools[0].tool_name, 'tool1') + + # Registry + self.assertEqual(metadata.registry.name, 'team-alpha') + self.assertEqual(metadata.registry.statestore, 'registry-store') + + # Execution + self.assertEqual(metadata.max_iterations, 10) + self.assertEqual(metadata.tool_choice, 'auto') + + def test_agent_metadata_field(self): + """Test agent_metadata field is preserved.""" + agent = MockDaprAgent() + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.agent_metadata, {'custom_key': 'custom_value'}) + + def test_profile_instructions_extraction(self): + """Test profile instructions are extracted.""" + agent = MockDaprAgent(profile=MockProfile()) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.agent.instructions, ['Instruction 1', 'Instruction 2']) + + def test_registered_at_is_set(self): + """Test registered_at timestamp is set.""" + agent = MockDaprAgent() + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertIsNotNone(metadata.registered_at) + self.assertIn('T', metadata.registered_at) + + def test_empty_tools_list(self): + """Test empty tools list is handled.""" + agent = MockDaprAgent(tools=[]) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.tools, []) + + def test_multiple_tools(self): + """Test multiple tools are extracted.""" + tools = [ + MockTool(name='tool1', description='First tool'), + MockTool(name='tool2', description='Second tool'), + ] + agent = MockDaprAgent(tools=tools) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(len(metadata.tools), 2) + tool_names = [t.tool_name for t in metadata.tools] + self.assertIn('tool1', tool_names) + self.assertIn('tool2', tool_names) + + def test_none_profile(self): + """Test handling of None profile.""" + agent = MockDaprAgent(profile=None) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.agent.role, '') + self.assertEqual(metadata.agent.goal, '') + self.assertEqual(metadata.agent.system_prompt, '') + + def test_none_memory(self): + """Test handling of None memory.""" + agent = MockDaprAgent(memory=None) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.memory.type, '') + self.assertIsNone(metadata.memory.statestore) + + def test_none_llm(self): + """Test handling of None LLM.""" + agent = MockDaprAgent(llm=None) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.llm.client, '') + self.assertEqual(metadata.llm.provider, 'unknown') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_introspection.py b/ext/dapr-ext-agent_core/tests/test_introspection.py new file mode 100644 index 00000000..9ad18ddd --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_introspection.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest + +from dapr.ext.agent_core.introspection import detect_framework + + +class DetectFrameworkTest(unittest.TestCase): + """Tests for detect_framework function.""" + + def test_detect_langgraph_by_class_name(self): + """Test detection of LangGraph by CompiledStateGraph class name.""" + + class CompiledStateGraph: + pass + + agent = CompiledStateGraph() + result = detect_framework(agent) + self.assertEqual(result, 'langgraph') + + def test_detect_langgraph_by_module(self): + """Test detection of LangGraph by module path.""" + + class MockGraph: + pass + + MockGraph.__module__ = 'langgraph.graph.state' + agent = MockGraph() + result = detect_framework(agent) + self.assertEqual(result, 'langgraph') + + def test_detect_dapr_agents_by_module(self): + """Test detection of dapr-agents by module path.""" + + class MockAgent: + pass + + MockAgent.__module__ = 'dapr_agents.agents.base' + agent = MockAgent() + result = detect_framework(agent) + self.assertEqual(result, 'dapr_agents') + + def test_detect_strands_by_class_name(self): + """Test detection of Strands by DaprSessionManager class name.""" + + class DaprSessionManager: + pass + + agent = DaprSessionManager() + result = detect_framework(agent) + self.assertEqual(result, 'strands') + + def test_detect_strands_by_module(self): + """Test detection of Strands by module path.""" + + class MockSessionManager: + pass + + # Use actual type name that detection looks for + MockSessionManager.__module__ = 'dapr.ext.strands' + MockSessionManager.__name__ = 'DaprSessionManager' + agent = MockSessionManager() + result = detect_framework(agent) + self.assertEqual(result, 'strands') + + def test_detect_unknown_framework(self): + """Test detection returns None for unknown frameworks.""" + + class UnknownAgent: + pass + + UnknownAgent.__module__ = 'some.unknown.module' + agent = UnknownAgent() + result = detect_framework(agent) + self.assertIsNone(result) + + def test_detect_builtin_object(self): + """Test detection returns None for builtin objects.""" + result = detect_framework('string') + self.assertIsNone(result) + + result = detect_framework(42) + self.assertIsNone(result) + + result = detect_framework([1, 2, 3]) + self.assertIsNone(result) + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py b/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py new file mode 100644 index 00000000..28786561 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest +from unittest import mock + +from dapr.ext.agent_core.mapping.langgraph import LangGraphMapper + + +class MockCheckpointer: + """Mock DaprCheckpointer for testing.""" + + def __init__(self, state_store_name='test-store'): + self.state_store_name = state_store_name + + +class MockCompiledStateGraph: + """Mock CompiledStateGraph for testing.""" + + def __init__( + self, + name='test-graph', + checkpointer=None, + nodes=None, + ): + self._name = name + self.checkpointer = checkpointer + self.nodes = nodes or {} + + def get_name(self): + return self._name + + +class LangGraphMapperTest(unittest.TestCase): + """Tests for LangGraphMapper metadata extraction.""" + + def test_mapper_instantiation(self): + """Test that LangGraphMapper can be instantiated.""" + mapper = LangGraphMapper() + self.assertIsNotNone(mapper) + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_basic_metadata_extraction(self, mock_pregel_node): + """Test basic metadata extraction from a mock graph.""" + checkpointer = MockCheckpointer(state_store_name='my-store') + graph = MockCompiledStateGraph( + name='my-graph', + checkpointer=checkpointer, + nodes={'__start__': None}, + ) + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertEqual(metadata.schema_version, '1.0.0') + self.assertEqual(metadata.agent.type, 'MockCompiledStateGraph') + self.assertEqual(metadata.name, 'my-graph') + self.assertEqual(metadata.memory.type, 'DaprCheckpointer') + self.assertEqual(metadata.memory.statestore, 'my-store') + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_without_checkpointer(self, mock_pregel_node): + """Test metadata extraction without a checkpointer.""" + graph = MockCompiledStateGraph( + name='no-checkpointer-graph', + checkpointer=None, + nodes={}, + ) + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertIsNone(metadata.memory.statestore) + self.assertIsNone(metadata.agent.statestore) + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_agent_role_defaults(self, mock_pregel_node): + """Test agent metadata default values.""" + graph = MockCompiledStateGraph(name='test') + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertEqual(metadata.agent.role, 'Assistant') + self.assertFalse(metadata.agent.orchestrator) + self.assertEqual(metadata.agent.appid, '') + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_llm_defaults(self, mock_pregel_node): + """Test LLM metadata defaults when no LLM is detected.""" + graph = MockCompiledStateGraph(name='test') + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertEqual(metadata.llm.client, '') + self.assertEqual(metadata.llm.provider, 'unknown') + self.assertEqual(metadata.llm.model, 'unknown') + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_pubsub_defaults(self, mock_pregel_node): + """Test PubSub metadata defaults.""" + graph = MockCompiledStateGraph(name='test') + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertEqual(metadata.pubsub.name, '') + self.assertIsNone(metadata.pubsub.broadcast_topic) + self.assertIsNone(metadata.pubsub.agent_topic) + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_registered_at_is_set(self, mock_pregel_node): + """Test registered_at timestamp is set.""" + graph = MockCompiledStateGraph(name='test') + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertIsNotNone(metadata.registered_at) + self.assertIn('T', metadata.registered_at) + + +class LangGraphProviderExtractionTest(unittest.TestCase): + """Tests for LLM provider extraction.""" + + def test_extract_openai_provider(self): + """Test OpenAI provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_openai.chat'), 'openai') + + def test_extract_azure_provider(self): + """Test Azure OpenAI provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain.azure_openai'), 'azure_openai') + + def test_extract_anthropic_provider(self): + """Test Anthropic provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_anthropic.chat'), 'anthropic') + + def test_extract_ollama_provider(self): + """Test Ollama provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_ollama.llms'), 'ollama') + + def test_extract_google_provider(self): + """Test Google provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_google.genai'), 'google') + + def test_extract_gemini_provider(self): + """Test Gemini provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_gemini.chat'), 'google') + + def test_extract_cohere_provider(self): + """Test Cohere provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_cohere.chat'), 'cohere') + + def test_extract_unknown_provider(self): + """Test unknown provider returns 'unknown'.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('some.unknown.module'), 'unknown') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_strands_mapper.py b/ext/dapr-ext-agent_core/tests/test_strands_mapper.py new file mode 100644 index 00000000..2a2588c2 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_strands_mapper.py @@ -0,0 +1,187 @@ +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import time +import unittest +from unittest import mock + +from dapr.ext.agent_core.mapping.strands import StrandsMapper + + +def make_mock_session_manager(session_id='test-session', state_store='statestore'): + """Create a mock DaprSessionManager for testing.""" + mock_manager = mock.Mock() + mock_manager._session_id = session_id + mock_manager._state_store_name = state_store + mock_manager.state_store_name = state_store # Public property + return mock_manager + + +def make_mock_session_agent(agent_id='test-agent', state=None): + """Create a mock SessionAgent for testing.""" + mock_agent = mock.Mock() + mock_agent.agent_id = agent_id + mock_agent.state = state or {} + mock_agent.conversation_manager_state = {} + mock_agent.created_at = time.time() + return mock_agent + + +class TestStrandsMapper(unittest.TestCase): + def setUp(self): + self.mapper = StrandsMapper() + + def test_map_agent_metadata_no_session_agent(self): + """Test mapping when no SessionAgent exists in the session.""" + session_manager = make_mock_session_manager() + session_manager._read_state.return_value = None + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + + # Should use fallback values + self.assertEqual(result.schema_version, 'edge') + self.assertEqual(result.agent.role, 'Session Manager') + self.assertEqual(result.agent.type, 'Strands') + self.assertIsNone(result.llm) + self.assertEqual(result.tools, []) # Empty list, not None + self.assertIsNone(result.tool_choice) + self.assertIsNone(result.max_iterations) + self.assertEqual(result.name, 'strands-session-test-session') + + def test_map_agent_metadata_with_basic_agent(self): + """Test mapping with a basic SessionAgent.""" + session_manager = make_mock_session_manager() + + # Mock manifest + session_manager._read_state.return_value = {'agents': ['assistant']} + + # Mock agent + agent_state = { + 'system_prompt': 'You are a helpful assistant', + 'role': 'AI Assistant', + 'goal': 'Help users', + } + mock_agent = make_mock_session_agent('assistant', agent_state) + session_manager.read_agent.return_value = mock_agent + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + + # Should extract from SessionAgent + self.assertEqual(result.agent.role, 'AI Assistant') + self.assertEqual(result.agent.goal, 'Help users') + self.assertEqual(result.agent.system_prompt, 'You are a helpful assistant') + self.assertEqual(result.name, 'strands-test-session-assistant') + self.assertEqual(result.agent_metadata['agent_id'], 'assistant') + + def test_map_agent_metadata_with_llm_config(self): + """Test mapping with LLM configuration.""" + session_manager = make_mock_session_manager() + session_manager._read_state.return_value = {'agents': ['assistant']} + + agent_state = { + 'system_prompt': 'You are helpful', + 'conversation_provider': 'openai', + 'llm_config': { + 'provider': 'openai', + 'model': 'gpt-4', + }, + } + mock_agent = make_mock_session_agent('assistant', agent_state) + session_manager.read_agent.return_value = mock_agent + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + + # Should extract LLM metadata + self.assertIsNotNone(result.llm) + self.assertEqual(result.llm.client, 'dapr_conversation') + self.assertEqual(result.llm.provider, 'openai') + self.assertEqual(result.llm.model, 'gpt-4') + self.assertEqual(result.llm.component_name, 'openai') + + def test_map_agent_metadata_with_tools(self): + """Test mapping with tools configuration.""" + session_manager = make_mock_session_manager() + session_manager._read_state.return_value = {'agents': ['assistant']} + + agent_state = { + 'tools': [ + { + 'name': 'calculator', + 'description': 'Calculate math', + 'args': {'x': 'int', 'y': 'int'}, + }, + {'name': 'search', 'description': 'Search web', 'args': {'query': 'str'}}, + ], + 'tool_choice': 'auto', + } + mock_agent = make_mock_session_agent('assistant', agent_state) + session_manager.read_agent.return_value = mock_agent + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + + # Should extract tools metadata + self.assertIsNotNone(result.tools) + self.assertEqual(len(result.tools), 2) + self.assertEqual(result.tools[0].tool_name, 'calculator') + self.assertEqual(result.tools[0].tool_description, 'Calculate math') + self.assertEqual(result.tools[1].tool_name, 'search') + self.assertEqual(result.tool_choice, 'auto') + + def test_map_agent_metadata_with_instructions(self): + """Test mapping with instructions.""" + session_manager = make_mock_session_manager() + session_manager._read_state.return_value = {'agents': ['assistant']} + + agent_state = { + 'instructions': ['Be concise', 'Be helpful', 'Be friendly'], + 'max_iterations': 10, + } + mock_agent = make_mock_session_agent('assistant', agent_state) + session_manager.read_agent.return_value = mock_agent + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + + # Should extract instructions and max_iterations + self.assertIsNotNone(result.agent.instructions) + self.assertEqual(len(result.agent.instructions), 3) + self.assertEqual(result.agent.instructions[0], 'Be concise') + self.assertEqual(result.max_iterations, 10) + + def test_extract_llm_metadata_no_config(self): + """Test LLM metadata extraction with no configuration.""" + result = self.mapper._extract_llm_metadata({}) + self.assertIsNone(result) + + def test_extract_tools_metadata_no_tools(self): + """Test tools metadata extraction with no tools.""" + result = self.mapper._extract_tools_metadata({}) + self.assertEqual(result, []) # Empty list, not None + + def test_extract_tools_metadata_with_tool_objects(self): + """Test tools metadata extraction with tool objects.""" + mock_tool = mock.Mock() + mock_tool.name = 'my_tool' + mock_tool.description = 'My tool description' + mock_tool.args = {'param': 'value'} + + agent_state = {'tools': [mock_tool]} + result = self.mapper._extract_tools_metadata(agent_state) + + self.assertIsNotNone(result) + self.assertEqual(len(result), 1) + self.assertEqual(result[0].tool_name, 'my_tool') + self.assertEqual(result[0].tool_description, 'My tool description') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py new file mode 100644 index 00000000..1aa5fab2 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest + +from dapr.ext.agent_core.introspection import detect_framework +from dapr.ext.agent_core.mapping.strands import StrandsMapper + + +class MockSessionManager: + """Mock DaprSessionManager for testing.""" + + def __init__(self, state_store_name='test-statestore', session_id='test-session-123'): + self._state_store_name = state_store_name + self._session_id = session_id + + @property + def state_store_name(self) -> str: + return self._state_store_name + + +class StrandsMapperTest(unittest.TestCase): + """Tests for StrandsMapper metadata extraction.""" + + def test_mapper_instantiation(self): + """Test that StrandsMapper can be instantiated.""" + mapper = StrandsMapper() + self.assertIsNotNone(mapper) + + def test_metadata_extraction_basic(self): + """Test basic metadata extraction from a mock session manager.""" + mock_manager = MockSessionManager() + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertEqual(metadata.schema_version, '1.0.0') + self.assertEqual(metadata.agent.type, 'Strands') + self.assertEqual(metadata.agent.role, 'Session Manager') + self.assertEqual(metadata.agent.orchestrator, False) + self.assertEqual( + metadata.agent.goal, 'Manages multi-agent sessions with distributed state storage' + ) + + def test_metadata_memory_extraction(self): + """Test memory metadata extraction.""" + mock_manager = MockSessionManager(state_store_name='custom-store', session_id='session-456') + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertEqual(metadata.memory.type, 'DaprSessionManager') + self.assertEqual(metadata.memory.session_id, 'session-456') + self.assertEqual(metadata.memory.statestore, 'custom-store') + + def test_metadata_name_generation(self): + """Test agent name generation with session ID.""" + mock_manager = MockSessionManager(session_id='my-session') + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + # Fallback path generates different name + self.assertEqual(metadata.name, 'strands-session-my-session') + + def test_metadata_name_without_session_id(self): + """Test agent name generation without session ID.""" + mock_manager = MockSessionManager(session_id=None) + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertEqual(metadata.name, 'strands-session') + + def test_metadata_agent_metadata_field(self): + """Test agent_metadata field contains framework info.""" + mock_manager = MockSessionManager(state_store_name='store1', session_id='sess1') + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertEqual(metadata.agent_metadata['framework'], 'strands') + self.assertEqual(metadata.agent_metadata['session_id'], 'sess1') + self.assertEqual(metadata.agent_metadata['state_store'], 'store1') + # agent_id is None in fallback path when no SessionAgent exists + self.assertIsNone(metadata.agent_metadata['agent_id']) + + def test_metadata_registry_defaults(self): + """Test registry metadata has correct defaults.""" + mock_manager = MockSessionManager() + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertIsNotNone(metadata.registry) + self.assertIsNone(metadata.registry.statestore) + self.assertIsNone(metadata.registry.name) + + def test_metadata_optional_fields_are_none(self): + """Test optional fields are None when not applicable.""" + mock_manager = MockSessionManager() + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertIsNone(metadata.pubsub) + self.assertIsNone(metadata.llm) + self.assertEqual(metadata.tools, []) # Empty list, not None + self.assertIsNone(metadata.max_iterations) + self.assertIsNone(metadata.tool_choice) + + def test_metadata_registered_at_is_set(self): + """Test registered_at timestamp is set.""" + mock_manager = MockSessionManager() + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertIsNotNone(metadata.registered_at) + self.assertIn('T', metadata.registered_at) + + +class StrandsFrameworkDetectionTest(unittest.TestCase): + """Tests for framework detection with Strands objects.""" + + def test_detect_framework_by_class_name(self): + """Test detection by DaprSessionManager class name.""" + + class DaprSessionManager: + pass + + mock = DaprSessionManager() + framework = detect_framework(mock) + self.assertEqual(framework, 'strands') + + def test_detect_framework_by_module(self): + """Test detection by strands module path.""" + + class MockAgent: + pass + + # Use actual type name and module that detection looks for + MockAgent.__module__ = 'strands.agent' + MockAgent.__name__ = 'Agent' + mock = MockAgent() + framework = detect_framework(mock) + self.assertEqual(framework, 'strands') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_types.py b/ext/dapr-ext-agent_core/tests/test_types.py new file mode 100644 index 00000000..c2d0eef0 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_types.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest + +from dapr.ext.agent_core.types import ( + AgentMetadata, + AgentMetadataSchema, + LLMMetadata, + MemoryMetadata, + PubSubMetadata, + RegistryMetadata, + SupportedFrameworks, + ToolMetadata, +) + + +class SupportedFrameworksTest(unittest.TestCase): + """Tests for SupportedFrameworks enum.""" + + def test_dapr_agents_value(self): + """Test DAPR_AGENTS enum value.""" + self.assertEqual(SupportedFrameworks.DAPR_AGENTS.value, 'dapr-agents') + + def test_langgraph_value(self): + """Test LANGGRAPH enum value.""" + self.assertEqual(SupportedFrameworks.LANGGRAPH.value, 'langgraph') + + def test_strands_value(self): + """Test STRANDS enum value.""" + self.assertEqual(SupportedFrameworks.STRANDS.value, 'strands') + + def test_all_frameworks_present(self): + """Test all expected frameworks are present.""" + framework_values = [f.value for f in SupportedFrameworks] + self.assertIn('dapr-agents', framework_values) + self.assertIn('langgraph', framework_values) + self.assertIn('strands', framework_values) + + +class AgentMetadataTest(unittest.TestCase): + """Tests for AgentMetadata model.""" + + def test_minimal_agent_metadata(self): + """Test AgentMetadata with minimal required fields.""" + agent = AgentMetadata(appid='test-app', type='standalone') + self.assertEqual(agent.appid, 'test-app') + self.assertEqual(agent.type, 'standalone') + self.assertEqual(agent.orchestrator, False) + self.assertEqual(agent.role, '') + self.assertEqual(agent.goal, '') + + def test_full_agent_metadata(self): + """Test AgentMetadata with all fields populated.""" + agent = AgentMetadata( + appid='test-app', + type='durable', + orchestrator=True, + role='coordinator', + goal='Manage tasks', + instructions=['Step 1', 'Step 2'], + statestore='agent-store', + system_prompt='You are a helpful assistant.', + ) + self.assertEqual(agent.appid, 'test-app') + self.assertEqual(agent.type, 'durable') + self.assertTrue(agent.orchestrator) + self.assertEqual(agent.role, 'coordinator') + self.assertEqual(agent.goal, 'Manage tasks') + self.assertEqual(agent.instructions, ['Step 1', 'Step 2']) + self.assertEqual(agent.statestore, 'agent-store') + self.assertEqual(agent.system_prompt, 'You are a helpful assistant.') + + +class LLMMetadataTest(unittest.TestCase): + """Tests for LLMMetadata model.""" + + def test_minimal_llm_metadata(self): + """Test LLMMetadata with minimal required fields.""" + llm = LLMMetadata(client='OpenAI', provider='openai') + self.assertEqual(llm.client, 'OpenAI') + self.assertEqual(llm.provider, 'openai') + self.assertEqual(llm.api, 'unknown') + self.assertEqual(llm.model, 'unknown') + + def test_azure_llm_metadata(self): + """Test LLMMetadata with Azure-specific fields.""" + llm = LLMMetadata( + client='AzureOpenAI', + provider='azure_openai', + api='chat', + model='gpt-4', + azure_endpoint='https://myresource.openai.azure.com', + azure_deployment='gpt-4-deployment', + ) + self.assertEqual(llm.azure_endpoint, 'https://myresource.openai.azure.com') + self.assertEqual(llm.azure_deployment, 'gpt-4-deployment') + + +class PubSubMetadataTest(unittest.TestCase): + """Tests for PubSubMetadata model.""" + + def test_minimal_pubsub_metadata(self): + """Test PubSubMetadata with minimal required fields.""" + pubsub = PubSubMetadata(name='pubsub-component') + self.assertEqual(pubsub.name, 'pubsub-component') + self.assertIsNone(pubsub.broadcast_topic) + self.assertIsNone(pubsub.agent_topic) + + def test_full_pubsub_metadata(self): + """Test PubSubMetadata with all fields.""" + pubsub = PubSubMetadata( + name='pubsub-component', + broadcast_topic='broadcast', + agent_topic='agent-topic', + ) + self.assertEqual(pubsub.broadcast_topic, 'broadcast') + self.assertEqual(pubsub.agent_topic, 'agent-topic') + + +class ToolMetadataTest(unittest.TestCase): + """Tests for ToolMetadata model.""" + + def test_tool_metadata(self): + """Test ToolMetadata creation.""" + tool = ToolMetadata( + tool_name='search', + tool_description='Search the web', + tool_args='{"query": "string"}', + ) + self.assertEqual(tool.tool_name, 'search') + self.assertEqual(tool.tool_description, 'Search the web') + self.assertEqual(tool.tool_args, '{"query": "string"}') + + +class MemoryMetadataTest(unittest.TestCase): + """Tests for MemoryMetadata model.""" + + def test_minimal_memory_metadata(self): + """Test MemoryMetadata with minimal required fields.""" + memory = MemoryMetadata(type='DaprCheckpointer') + self.assertEqual(memory.type, 'DaprCheckpointer') + self.assertIsNone(memory.statestore) + self.assertIsNone(memory.session_id) + + def test_full_memory_metadata(self): + """Test MemoryMetadata with all fields.""" + memory = MemoryMetadata( + type='DaprSessionManager', + statestore='session-store', + session_id='session-123', + ) + self.assertEqual(memory.type, 'DaprSessionManager') + self.assertEqual(memory.statestore, 'session-store') + self.assertEqual(memory.session_id, 'session-123') + + +class RegistryMetadataTest(unittest.TestCase): + """Tests for RegistryMetadata model.""" + + def test_empty_registry_metadata(self): + """Test RegistryMetadata with defaults.""" + registry = RegistryMetadata() + self.assertIsNone(registry.statestore) + self.assertIsNone(registry.name) + + def test_full_registry_metadata(self): + """Test RegistryMetadata with all fields.""" + registry = RegistryMetadata(statestore='registry-store', name='team-alpha') + self.assertEqual(registry.statestore, 'registry-store') + self.assertEqual(registry.name, 'team-alpha') + + +class AgentMetadataSchemaTest(unittest.TestCase): + """Tests for AgentMetadataSchema model.""" + + def test_minimal_schema(self): + """Test AgentMetadataSchema with minimal required fields.""" + schema = AgentMetadataSchema( + schema_version='1.0.0', + agent=AgentMetadata(appid='test-app', type='standalone'), + name='test-agent', + registered_at='2026-01-01T00:00:00Z', + ) + self.assertEqual(schema.schema_version, '1.0.0') + self.assertEqual(schema.agent.appid, 'test-app') + self.assertEqual(schema.name, 'test-agent') + self.assertIsNone(schema.pubsub) + self.assertIsNone(schema.memory) + self.assertIsNone(schema.llm) + + def test_full_schema(self): + """Test AgentMetadataSchema with all fields.""" + schema = AgentMetadataSchema( + schema_version='1.0.0', + agent=AgentMetadata(appid='test-app', type='durable'), + name='full-agent', + registered_at='2026-01-01T00:00:00Z', + pubsub=PubSubMetadata(name='pubsub'), + memory=MemoryMetadata(type='DaprCheckpointer'), + llm=LLMMetadata(client='OpenAI', provider='openai'), + registry=RegistryMetadata(name='team-1'), + tools=[ + ToolMetadata(tool_name='search', tool_description='Search tool', tool_args='{}') + ], + max_iterations=10, + tool_choice='auto', + agent_metadata={'custom': 'data'}, + ) + self.assertEqual(schema.name, 'full-agent') + self.assertEqual(schema.pubsub.name, 'pubsub') + self.assertEqual(schema.memory.type, 'DaprCheckpointer') + self.assertEqual(schema.llm.client, 'OpenAI') + self.assertEqual(len(schema.tools), 1) + self.assertEqual(schema.max_iterations, 10) + self.assertEqual(schema.agent_metadata['custom'], 'data') + + def test_export_json_schema(self): + """Test export_json_schema method.""" + json_schema = AgentMetadataSchema.export_json_schema(version='1.0.0') + + self.assertIn('$schema', json_schema) + self.assertEqual(json_schema['$schema'], 'https://json-schema.org/draft/2020-12/schema') + self.assertEqual(json_schema['version'], '1.0.0') + self.assertIn('properties', json_schema) + + def test_model_dump(self): + """Test model_dump produces valid dictionary.""" + schema = AgentMetadataSchema( + schema_version='1.0.0', + agent=AgentMetadata(appid='test-app', type='standalone'), + name='test-agent', + registered_at='2026-01-01T00:00:00Z', + ) + data = schema.model_dump() + + self.assertIsInstance(data, dict) + self.assertEqual(data['schema_version'], '1.0.0') + self.assertEqual(data['name'], 'test-agent') + self.assertIn('agent', data) + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py index 6d2614d9..f14b9670 100644 --- a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py +++ b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py @@ -17,6 +17,7 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, cast import msgpack +from dapr.ext.agent_core import AgentRegistryAdapter from langchain_core.messages import AIMessage, HumanMessage, ToolMessage from langchain_core.runnables import RunnableConfig from ulid import ULID @@ -41,12 +42,14 @@ class DaprCheckpointer(BaseCheckpointSaver[Checkpoint]): REGISTRY_KEY = 'dapr_checkpoint_registry' - def __init__(self, store_name: str, key_prefix: str): - self.store_name = store_name + def __init__(self, state_store_name: str, key_prefix: str): + self.state_store_name = state_store_name self.key_prefix = key_prefix self.serde = JsonPlusSerializer() self.client = DaprClient() self._key_cache: Dict[str, str] = {} + self.registry_adapter: Optional[AgentRegistryAdapter] = None + self._registry_initialized = False # helper: construct Dapr key for a thread def _get_key(self, config: RunnableConfig) -> str: @@ -70,6 +73,10 @@ def put( metadata: CheckpointMetadata, new_versions: ChannelVersions, ) -> RunnableConfig: + if not self._registry_initialized: + self.registry_adapter = AgentRegistryAdapter.create_from_stack(registry=None) + self._registry_initialized = True + thread_id = config['configurable']['thread_id'] checkpoint_ns = config['configurable'].get('checkpoint_ns', '') config_checkpoint_id = config['configurable'].get('checkpoint_id', '') @@ -134,14 +141,14 @@ def put( ) _, data = self.serde.dumps_typed(checkpoint_data) - self.client.save_state(store_name=self.store_name, key=checkpoint_key, value=data) + self.client.save_state(store_name=self.state_store_name, key=checkpoint_key, value=data) latest_pointer_key = ( f'checkpoint_latest:{storage_safe_thread_id}:{storage_safe_checkpoint_ns}' ) self.client.save_state( - store_name=self.store_name, key=latest_pointer_key, value=checkpoint_key + store_name=self.state_store_name, key=latest_pointer_key, value=checkpoint_key ) return next_config @@ -183,7 +190,9 @@ def put_writes( thread_id=thread_id, checkpoint_ns=checkpoint_ns, checkpoint_id=checkpoint_id ) - self.client.save_state(store_name=self.store_name, key=key, value=json.dumps(write_obj)) + self.client.save_state( + store_name=self.state_store_name, key=key, value=json.dumps(write_obj) + ) checkpoint_key = self._make_safe_checkpoint_key( thread_id=thread_id, checkpoint_ns=checkpoint_ns, checkpoint_id=checkpoint_id @@ -194,11 +203,11 @@ def put_writes( ) self.client.save_state( - store_name=self.store_name, key=latest_pointer_key, value=checkpoint_key + store_name=self.state_store_name, key=latest_pointer_key, value=checkpoint_key ) def list(self, config: RunnableConfig) -> list[CheckpointTuple]: - reg_resp = self.client.get_state(store_name=self.store_name, key=self.REGISTRY_KEY) + reg_resp = self.client.get_state(store_name=self.state_store_name, key=self.REGISTRY_KEY) if not reg_resp.data: return [] @@ -206,7 +215,7 @@ def list(self, config: RunnableConfig) -> list[CheckpointTuple]: checkpoints: list[CheckpointTuple] = [] for key in keys: - cp_resp = self.client.get_state(store_name=self.store_name, key=key) + cp_resp = self.client.get_state(store_name=self.state_store_name, key=key) if not cp_resp.data: continue @@ -229,9 +238,9 @@ def list(self, config: RunnableConfig) -> list[CheckpointTuple]: def delete_thread(self, config: RunnableConfig) -> None: key = self._get_key(config) - self.client.delete_state(store_name=self.store_name, key=key) + self.client.delete_state(store_name=self.state_store_name, key=key) - reg_resp = self.client.get_state(store_name=self.store_name, key=self.REGISTRY_KEY) + reg_resp = self.client.get_state(store_name=self.state_store_name, key=self.REGISTRY_KEY) if not reg_resp.data: return @@ -240,7 +249,7 @@ def delete_thread(self, config: RunnableConfig) -> None: if key in registry: registry.remove(key) self.client.save_state( - store_name=self.store_name, + store_name=self.state_store_name, key=self.REGISTRY_KEY, value=json.dumps(registry), ) @@ -261,13 +270,13 @@ def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: ) # First we extract the latest checkpoint key - checkpoint_key = self.client.get_state(store_name=self.store_name, key=key) + checkpoint_key = self.client.get_state(store_name=self.state_store_name, key=key) if not checkpoint_key.data: return None # To then derive the checkpoint data checkpoint_data = self.client.get_state( - store_name=self.store_name, + store_name=self.state_store_name, # checkpoint_key.data can either be str or bytes key=checkpoint_key.data.decode() if isinstance(checkpoint_key.data, bytes) diff --git a/ext/dapr-ext-langgraph/setup.cfg b/ext/dapr-ext-langgraph/setup.cfg index 5a252a79..cc156227 100644 --- a/ext/dapr-ext-langgraph/setup.cfg +++ b/ext/dapr-ext-langgraph/setup.cfg @@ -25,6 +25,7 @@ packages = find_namespace: include_package_data = True install_requires = dapr >= 1.17.0.dev + #dapr.ext.agent_core >= 1.17.0.dev langgraph >= 0.3.6 langchain >= 0.1.17 python-ulid >= 3.0.0 diff --git a/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py b/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py index c9a98ebd..bb3a33de 100644 --- a/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py +++ b/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py @@ -66,9 +66,39 @@ def __init__( self._ttl = ttl self._consistency = consistency self._owns_client = False + self._session_id = session_id + self._registry_initialized = False super().__init__(session_id=session_id, session_repository=self) + @property + def state_store_name(self) -> str: + """Get the Dapr state store name. + + Returns: + Name of the Dapr state store component. + """ + return self._state_store_name + + def _register_with_agent_registry(self) -> Optional[Any]: + """Register or update this session manager in the agent registry. + + Returns: + AgentRegistryAdapter if registration succeeded, None otherwise. + """ + try: + from dapr.ext.agent_core.metadata import AgentRegistryAdapter + + return AgentRegistryAdapter.create_from_stack(registry=None) + + except ImportError: + # agent_core extension not installed, skip registration + pass + except Exception as e: + logger.debug(f'Agent registry registration skipped: {e}') + + return None + @classmethod def from_address( cls, @@ -219,6 +249,12 @@ def _write_state(self, key: str, data: Dict[str, Any]) -> None: Raises: SessionException: If write fails. """ + # Register with agent registry on first write (similar to LangGraph's put()) + if not self._registry_initialized: + registry_adapter = self._register_with_agent_registry() + if registry_adapter: + self._registry_initialized = True + try: content = json.dumps(data, ensure_ascii=False) self._dapr_client.save_state( diff --git a/ext/dapr-ext-strands/setup.cfg b/ext/dapr-ext-strands/setup.cfg index 5ccd5835..2eddc3f1 100644 --- a/ext/dapr-ext-strands/setup.cfg +++ b/ext/dapr-ext-strands/setup.cfg @@ -25,6 +25,7 @@ packages = find_namespace: include_package_data = True install_requires = dapr >= 1.17.0.dev + #dapr.ext.agent_core >= 1.17.0.dev strands-agents strands-agents-tools python-ulid >= 3.0.0