Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion sdk/ai/azure-ai-projects/.env.template
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
# `https://<your-ai-services-account-name>.services.ai.azure.com/api/projects/<your-project-name>`
AZURE_AI_PROJECT_ENDPOINT=
AZURE_AI_MODEL_DEPLOYMENT_NAME=
AGENT_NAME=
AZURE_AI_AGENT_NAME=
CONVERSATION_ID=
CONNECTION_NAME=
AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID=
Expand Down
131 changes: 74 additions & 57 deletions sdk/ai/azure-ai-projects/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,19 +122,20 @@ See the "responses" folder in the [package samples][samples] for additional samp
<!-- SNIPPET:sample_responses_basic.responses -->

```python
with project_client.get_openai_client() as openai_client:
response = openai_client.responses.create(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
input="What is the size of France in square miles?",
)
print(f"Response output: {response.output_text}")
openai_client = project_client.get_openai_client()

response = openai_client.responses.create(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
input="And what is the capital city?",
previous_response_id=response.id,
)
print(f"Response output: {response.output_text}")
response = openai_client.responses.create(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
input="What is the size of France in square miles?",
)
print(f"Response output: {response.output_text}")

response = openai_client.responses.create(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
input="And what is the capital city?",
previous_response_id=response.id,
)
print(f"Response output: {response.output_text}")
```

<!-- END SNIPPET -->
Expand All @@ -150,43 +151,44 @@ See the "agents" folder in the [package samples][samples] for an extensive set o
<!-- SNIPPET:sample_agent_basic.prompt_agent_basic -->

```python
with project_client.get_openai_client() as openai_client:
agent = project_client.agents.create_version(
agent_name="MyAgent",
definition=PromptAgentDefinition(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
instructions="You are a helpful assistant that answers general questions",
),
)
print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})")
openai_client = project_client.get_openai_client()

conversation = openai_client.conversations.create(
items=[{"type": "message", "role": "user", "content": "What is the size of France in square miles?"}],
)
print(f"Created conversation with initial user message (id: {conversation.id})")
agent = project_client.agents.create_version(
agent_name="MyAgent",
definition=PromptAgentDefinition(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
instructions="You are a helpful assistant that answers general questions",
),
)
print(f"Agent created (id: {agent.id}, name: {agent.name}, version: {agent.version})")

response = openai_client.responses.create(
conversation=conversation.id,
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
input="",
)
print(f"Response output: {response.output_text}")
conversation = openai_client.conversations.create(
items=[{"type": "message", "role": "user", "content": "What is the size of France in square miles?"}],
)
print(f"Created conversation with initial user message (id: {conversation.id})")

openai_client.conversations.items.create(
conversation_id=conversation.id,
items=[{"type": "message", "role": "user", "content": "And what is the capital city?"}],
)
print(f"Added a second user message to the conversation")
response = openai_client.responses.create(
conversation=conversation.id,
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
input="",
)
print(f"Response output: {response.output_text}")

response = openai_client.responses.create(
conversation=conversation.id,
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
input="",
)
print(f"Response output: {response.output_text}")
openai_client.conversations.items.create(
conversation_id=conversation.id,
items=[{"type": "message", "role": "user", "content": "And what is the capital city?"}],
)
print(f"Added a second user message to the conversation")

response = openai_client.responses.create(
conversation=conversation.id,
extra_body={"agent": {"name": agent.name, "type": "agent_reference"}},
input="",
)
print(f"Response output: {response.output_text}")

openai_client.conversations.delete(conversation_id=conversation.id)
print("Conversation deleted")
openai_client.conversations.delete(conversation_id=conversation.id)
print("Conversation deleted")

project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version)
print("Agent deleted")
Expand Down Expand Up @@ -632,7 +634,7 @@ with (
agent = project_client.agents.create_version(
agent_name=os.environ["AZURE_AI_AGENT_NAME"],
definition=PromptAgentDefinition(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
model=model_deployment_name,
instructions="You are a helpful assistant that answers general questions",
),
)
Expand All @@ -643,12 +645,29 @@ with (
item_schema={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]},
include_sample_schema=True,
)
# Notes: for data_mapping:
# sample.output_text is the string output of the agent
# sample.output_items is the structured JSON output of the agent, including tool calls information
testing_criteria = [
{
"type": "azure_ai_evaluator",
"name": "violence_detection",
"evaluator_name": "builtin.violence",
"data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"},
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
},
{
"type": "azure_ai_evaluator",
"name": "fluency",
"evaluator_name": "builtin.fluency",
"initialization_parameters": {"deployment_name": f"{model_deployment_name}"},
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
},
{
"type": "azure_ai_evaluator",
"name": "task_adherence",
"evaluator_name": "builtin.task_adherence",
"initialization_parameters": {"deployment_name": f"{model_deployment_name}"},
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_items}}"},
}
]
eval_object = openai_client.evals.create(
Expand Down Expand Up @@ -825,7 +844,9 @@ folder in the [package samples][samples].
<!-- SNIPPET:sample_indexes.indexes_sample-->

```python
print(f"Create Index `{index_name}` with version `{index_version}`, referencing an existing AI Search resource:")
print(
f"Create Index `{index_name}` with version `{index_version}`, referencing an existing AI Search resource:"
)
index = project_client.indexes.create_or_update(
name=index_name,
version=index_version,
Expand Down Expand Up @@ -863,24 +884,20 @@ with open(file_path, "rb") as f:
uploaded_file = openai_client.files.create(file=f, purpose="fine-tune")
print(uploaded_file)

print("Waits for the given file to be processed, default timeout is 30 mins")
processed_file = openai_client.files.wait_for_processing(uploaded_file.id)
print(processed_file)

print(f"Retrieving file metadata with ID: {processed_file.id}")
retrieved_file = openai_client.files.retrieve(processed_file.id)
print(f"Retrieving file metadata with ID: {uploaded_file.id}")
retrieved_file = openai_client.files.retrieve(uploaded_file.id)
print(retrieved_file)

print(f"Retrieving file content with ID: {processed_file.id}")
file_content = openai_client.files.content(processed_file.id)
print(f"Retrieving file content with ID: {uploaded_file.id}")
file_content = openai_client.files.content(uploaded_file.id)
print(file_content.content)

print("Listing all files:")
for file in openai_client.files.list():
print(file)

print(f"Deleting file with ID: {processed_file.id}")
deleted_file = openai_client.files.delete(processed_file.id)
print(f"Deleting file with ID: {uploaded_file.id}")
deleted_file = openai_client.files.delete(uploaded_file.id)
print(f"Successfully deleted file: {deleted_file.id}")
```

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
Set these environment variables with your own values:
1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
page of your Microsoft Foundry portal.
2) AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project.
2) AZURE_AI_AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project.
3) CONVERSATION_ID - The ID of an existing Conversation associated with the Agent
"""

Expand All @@ -34,7 +34,7 @@

load_dotenv()

agent_name = os.environ["AGENT_NAME"]
agent_name = os.environ["AZURE_AI_AGENT_NAME"]
conversation_id = os.environ["CONVERSATION_ID"]

endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
Set these environment variables with your own values:
1) AZURE_AI_PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview
page of your Microsoft Foundry portal.
2) AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project.
2) AZURE_AI_AGENT_NAME - The name of an existing Agent in your Microsoft Foundry project.
3) CONVERSATION_ID - The ID of an existing Conversation associated with the Agent
"""

Expand All @@ -36,7 +36,7 @@
load_dotenv()

endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
agent_name = os.environ["AGENT_NAME"]
agent_name = os.environ["AZURE_AI_AGENT_NAME"]
conversation_id = os.environ["CONVERSATION_ID"]


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@

load_dotenv()
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
model_deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "") # Sample : gpt-4o-mini

# [START agent_evaluation_basic]
with (
Expand All @@ -50,7 +51,7 @@
agent = project_client.agents.create_version(
agent_name=os.environ["AZURE_AI_AGENT_NAME"],
definition=PromptAgentDefinition(
model=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
model=model_deployment_name,
instructions="You are a helpful assistant that answers general questions",
),
)
Expand All @@ -61,12 +62,29 @@
item_schema={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]},
include_sample_schema=True,
)
# Notes: for data_mapping:
# sample.output_text is the string output of the agent
# sample.output_items is the structured JSON output of the agent, including tool calls information
testing_criteria = [
{
"type": "azure_ai_evaluator",
"name": "violence_detection",
"evaluator_name": "builtin.violence",
"data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"},
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
},
{
"type": "azure_ai_evaluator",
"name": "fluency",
"evaluator_name": "builtin.fluency",
"initialization_parameters": {"deployment_name": f"{model_deployment_name}"},
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
},
{
"type": "azure_ai_evaluator",
"name": "task_adherence",
"evaluator_name": "builtin.task_adherence",
"initialization_parameters": {"deployment_name": f"{model_deployment_name}"},
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_items}}"},
}
]
eval_object = openai_client.evals.create(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,16 +51,18 @@
item_schema={"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]},
include_sample_schema=True,
)
# Notes: for data_mapping:
# {{sample.output_text}} is the string output of the provide model target for the given input in {{item.query}}
testing_criteria = [
{
"type": "azure_ai_evaluator",
"name": "violence_detection",
"evaluator_name": "builtin.violence",
"data_mapping": {"query": "{{item.query}}", "response": "{{item.response}}"},
"data_mapping": {"query": "{{item.query}}", "response": "{{sample.output_text}}"},
}
]
eval_object = openai_client.evals.create(
name="Agent Evaluation",
name="Model Evaluation",
data_source_config=data_source_config,
testing_criteria=testing_criteria, # type: ignore
)
Expand Down
Loading