Skip to content

Agentic search #602

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 16 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/agent_patterns/agents_as_tools.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from examples import config
import asyncio

from agents import Agent, ItemHelpers, MessageOutputItem, Runner, trace
Expand Down
1 change: 1 addition & 0 deletions examples/agent_patterns/deterministic.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from examples import config
import asyncio

from pydantic import BaseModel
Expand Down
2 changes: 1 addition & 1 deletion examples/agent_patterns/forcing_tool_use.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from __future__ import annotations

from examples import config
import asyncio
from typing import Any, Literal

Expand Down
1 change: 1 addition & 0 deletions examples/agent_patterns/input_guardrails.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from __future__ import annotations
from examples import config

import asyncio

Expand Down
1 change: 1 addition & 0 deletions examples/agent_patterns/llm_as_a_judge.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import annotations

import asyncio
from examples import config
from dataclasses import dataclass
from typing import Literal

Expand Down
2 changes: 1 addition & 1 deletion examples/agent_patterns/output_guardrails.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import asyncio
import json

from examples import config
from pydantic import BaseModel, Field

from agents import (
Expand Down
2 changes: 2 additions & 0 deletions examples/agent_patterns/parallelization.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from examples import config
import asyncio

from examples import config
from agents import Agent, ItemHelpers, Runner, trace

"""
Expand Down
1 change: 1 addition & 0 deletions examples/agent_patterns/routing.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from examples import config
import asyncio
import uuid

Expand Down
1 change: 1 addition & 0 deletions examples/basic/agent_lifecycle_example.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import asyncio
import random
from examples import config
from typing import Any

from pydantic import BaseModel
Expand Down
1 change: 1 addition & 0 deletions examples/basic/dynamic_system_prompt.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from examples import config
import asyncio
import random
from typing import Literal
Expand Down
1 change: 1 addition & 0 deletions examples/basic/hello_world.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from examples import config
import asyncio

from agents import Agent, Runner
Expand Down
1 change: 1 addition & 0 deletions examples/basic/hello_world_jupyter.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from examples import config
from agents import Agent, Runner

agent = Agent(name="Assistant", instructions="You are a helpful assistant")
Expand Down
1 change: 1 addition & 0 deletions examples/basic/lifecycle_example.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import asyncio
import random
from examples import config
from typing import Any

from pydantic import BaseModel
Expand Down
2 changes: 1 addition & 1 deletion examples/basic/stream_items.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import asyncio
import random

from examples import config
from agents import Agent, ItemHelpers, Runner, function_tool


Expand Down
1 change: 1 addition & 0 deletions examples/basic/stream_text.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import asyncio

from examples import config
from openai.types.responses import ResponseTextDeltaEvent

from agents import Agent, Runner
Expand Down
2 changes: 2 additions & 0 deletions examples/basic/tools.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import asyncio

from examples import config

from pydantic import BaseModel

from agents import Agent, Runner, function_tool
Expand Down
9 changes: 9 additions & 0 deletions examples/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import os
from agents import set_default_openai_client, AsyncOpenAI

api_key = os.getenv("OPENAI_API_KEY") or "" #Either set API_KEY in environment variable or put it directly here.
base_url = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here.

set_default_openai_client(
AsyncOpenAI(api_key=api_key, base_url=base_url)
)
1 change: 1 addition & 0 deletions examples/customer_service/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import random
import uuid

from examples import config
from pydantic import BaseModel

from agents import (
Expand Down
1 change: 1 addition & 0 deletions examples/handoffs/message_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import json
import random

from examples import config
from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace
from agents.extensions import handoff_filters

Expand Down
1 change: 1 addition & 0 deletions examples/handoffs/message_filter_streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import json
import random

from examples import config
from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace
from agents.extensions import handoff_filters

Expand Down
7 changes: 7 additions & 0 deletions examples/open_responses/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@

## Examples Built with OpenAI Agent SDK To Use Open Responses API Built In Tools

1. For SDK setup, see <a href="http://github.com/masaic-ai-platform/openai-agents-python?tab=readme-ov-file#get-started" target="_blank">Get Started</a>
2. For detailed instructions to run examples refer <a href="https://github.com/masaic-ai-platform/open-responses/blob/main/docs/Quickstart.md#6-running-agent-examples-built-with-openai-agent-sdk-to-use-open-responses-api-built-in-tools" target="_blank">Running Agent Examples</a>

---
3 changes: 3 additions & 0 deletions examples/open_responses/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Make the examples directory into a package to avoid top-level module name collisions.
# This is needed so that mypy treats files like examples/customer_service/main.py and
# examples/researcher_app/main.py as distinct modules rather than both named "main".
57 changes: 57 additions & 0 deletions examples/open_responses/agent_hands_off.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import asyncio
import os
from openai import AsyncOpenAI

from agents import Agent, Runner, function_tool, set_tracing_disabled
from agents.models.openai_responses import OpenAIResponsesModel

"""
This example demonstrates how to create an agent that hands off using groq's 'qwen-2.5-32b' model.
Add groq's API to variable - API_KEY.
"""

# Set custom parameters directly
BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here.
API_KEY = os.getenv("GROQ_API_KEY") or "" #Either set GROQ_API_KEY in environment variable or put it directly here.
MODEL_NAME = "qwen-2.5-32b"

# Define custom headers explicitly
custom_headers = {
"Authorization": f"Bearer {API_KEY}"
}

# Create a custom OpenAI client with the custom URL, API key, and explicit headers via default_headers.
client = AsyncOpenAI(
base_url=BASE_URL,
api_key=API_KEY,
default_headers=custom_headers
)

set_tracing_disabled(disabled=False)

spanish_agent = Agent(
name="Spanish agent",
instructions="You only speak Spanish.",
model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client)
)

english_agent = Agent(
name="English agent",
instructions="You only speak English",
model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client)
)

triage_agent = Agent(
name="Triage agent",
instructions="Handoff to the appropriate agent based on the language of the request.",
handoffs=[spanish_agent, english_agent],
model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client)
)

async def main():
result = await Runner.run(triage_agent, input="Hola, ¿cómo estás?")
print(result.final_output)
# Expected output: "¡Hola! Estoy bien, gracias por preguntar. ¿Y tú, cómo estás?"

if __name__ == "__main__":
asyncio.run(main())
148 changes: 148 additions & 0 deletions examples/open_responses/agent_hands_off_for_load_generation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
import asyncio
import os
from openai import AsyncOpenAI

from agents import Agent, Runner, set_tracing_disabled
from agents.models.openai_responses import OpenAIResponsesModel

"""
This program runs the agent handoff example for multiple iterations.
It allows selecting different model providers (groq, openai, claude) and runs the test
for a specified number of iterations (default: 10) with parallel execution (default: 5 concurrent runs).
"""

# Base URL for all providers
BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1"

# Model mapping for different providers
MODEL_MAPPING = {
"groq": {
"name": "qwen-2.5-32b",
"api_key_env": "GROQ_API_KEY",
"headers": lambda api_key: {
"Authorization": f"Bearer {api_key}"
}
},
"openai": {
"name": "gpt-4o",
"api_key_env": "OPENAI_API_KEY",
"headers": lambda api_key: {
"Authorization": f"Bearer {api_key}",
"x-model-provider": "openai"
}
},
"claude": {
"name": "claude-3-7-sonnet-20250219",
"api_key_env": "CLAUDE_API_KEY",
"headers": lambda api_key: {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
"x-model-provider": "claude"
}
}
}

def get_client_for_provider(provider):
"""Create an OpenAI client configured for the specified provider."""
provider_config = MODEL_MAPPING.get(provider)
if not provider_config:
raise ValueError(f"Unknown provider: {provider}. Available providers: {', '.join(MODEL_MAPPING.keys())}")

api_key_env = provider_config["api_key_env"]
api_key = os.getenv(api_key_env) or ""

if not api_key:
raise ValueError(f"API key for {provider} not found. Please set {api_key_env} environment variable.")

custom_headers = provider_config["headers"](api_key)

return AsyncOpenAI(
base_url=BASE_URL,
api_key=api_key,
default_headers=custom_headers
)

async def run_single_iteration(iteration, triage_agent, input_text):
"""Run a single iteration of the handoff test."""
try:
result = await Runner.run(triage_agent, input=input_text)
print(f"Iteration {iteration+1} completed with result: {result.final_output}")
return result.final_output
except Exception as e:
error_message = f"ERROR: {e}"
print(f"Iteration {iteration+1} failed with error: {error_message}")
return error_message

async def run_handoff_test(provider="groq", num_iterations=10, concurrency=5, input_text="Hola, ¿cómo estás?"):
"""Run the handoff test for the specified number of iterations with parallel execution."""
client = get_client_for_provider(provider)
model_name = MODEL_MAPPING[provider]["name"]

# Disable tracing to reduce output noise during load testing
set_tracing_disabled(disabled=True)

# Create agents
spanish_agent = Agent(
name="Spanish agent",
instructions="You only speak Spanish.",
model=OpenAIResponsesModel(model=model_name, openai_client=client)
)

english_agent = Agent(
name="English agent",
instructions="You only speak English",
model=OpenAIResponsesModel(model=model_name, openai_client=client)
)

triage_agent = Agent(
name="Triage agent",
instructions="Handoff to the appropriate agent based on the language of the request.",
handoffs=[spanish_agent, english_agent],
model=OpenAIResponsesModel(model=model_name, openai_client=client)
)

print(f"\nRunning {num_iterations} iterations with concurrency of {concurrency}...\n")

# Create tasks for all iterations
tasks = []
for i in range(num_iterations):
tasks.append(run_single_iteration(i, triage_agent, input_text))

# Run tasks with semaphore to limit concurrency
semaphore = asyncio.Semaphore(concurrency)

async def run_with_semaphore(task):
async with semaphore:
return await task

# Execute tasks with limited concurrency
results = await asyncio.gather(*[run_with_semaphore(task) for task in tasks])

# Print summary
print("\n===== SUMMARY =====")
print(f"Total iterations: {num_iterations}")
print(f"Successful responses: {len([r for r in results if not r.startswith('ERROR')])}")
print(f"Failed responses: {len([r for r in results if r.startswith('ERROR')])}")

return results

async def main():
# Ask for model provider
print("Available model providers: groq, openai, claude")
provider = input("Enter model provider (default: groq): ").lower() or "groq"

# Ask for number of iterations
iterations_input = input("Enter number of iterations (default: 10): ")
iterations = int(iterations_input) if iterations_input.strip() else 10

# Hard-coded concurrency level of 5
concurrency = 5

# Ask for input text
input_text = input("Enter input text (default: 'Hola, ¿cómo estás?'): ") or "Hola, ¿cómo estás?"

# Run the test
await run_handoff_test(provider, iterations, concurrency, input_text)

if __name__ == "__main__":
asyncio.run(main())
Loading