From 59ff323f96e171cdb6808a68eedee8de3d4b99b6 Mon Sep 17 00:00:00 2001 From: JB Date: Fri, 21 Mar 2025 13:35:12 +0530 Subject: [PATCH 01/14] Examples updated to import local model deployment. --- examples/agent_patterns/agents_as_tools.py | 1 + examples/agent_patterns/deterministic.py | 1 + examples/agent_patterns/input_guardrails.py | 1 + examples/agent_patterns/parallelization.py | 1 + examples/agent_patterns/routing.py | 1 + examples/basic/agent_lifecycle_example.py | 1 + examples/config.py | 9 +++++++ examples/masaic-platform/agent-hands-off.py | 29 +++++++++++++++++++++ examples/masaic-platform/function-call.py | 26 ++++++++++++++++++ examples/masaic-platform/hello-world.py | 13 +++++++++ 10 files changed, 83 insertions(+) create mode 100644 examples/config.py create mode 100644 examples/masaic-platform/agent-hands-off.py create mode 100644 examples/masaic-platform/function-call.py create mode 100644 examples/masaic-platform/hello-world.py diff --git a/examples/agent_patterns/agents_as_tools.py b/examples/agent_patterns/agents_as_tools.py index 9fd118efb..44d380e49 100644 --- a/examples/agent_patterns/agents_as_tools.py +++ b/examples/agent_patterns/agents_as_tools.py @@ -1,3 +1,4 @@ +from examples import config import asyncio from agents import Agent, ItemHelpers, MessageOutputItem, Runner, trace diff --git a/examples/agent_patterns/deterministic.py b/examples/agent_patterns/deterministic.py index 0c163afe9..2ca1336ff 100644 --- a/examples/agent_patterns/deterministic.py +++ b/examples/agent_patterns/deterministic.py @@ -1,3 +1,4 @@ +from examples import config import asyncio from pydantic import BaseModel diff --git a/examples/agent_patterns/input_guardrails.py b/examples/agent_patterns/input_guardrails.py index 154535511..ee8ce8963 100644 --- a/examples/agent_patterns/input_guardrails.py +++ b/examples/agent_patterns/input_guardrails.py @@ -1,4 +1,5 @@ from __future__ import annotations +from examples import config import asyncio diff --git a/examples/agent_patterns/parallelization.py b/examples/agent_patterns/parallelization.py index fe2a8ecd0..95613905d 100644 --- a/examples/agent_patterns/parallelization.py +++ b/examples/agent_patterns/parallelization.py @@ -1,3 +1,4 @@ +from examples import config import asyncio from agents import Agent, ItemHelpers, Runner, trace diff --git a/examples/agent_patterns/routing.py b/examples/agent_patterns/routing.py index 3dcaefa98..8b7ddd3d7 100644 --- a/examples/agent_patterns/routing.py +++ b/examples/agent_patterns/routing.py @@ -1,3 +1,4 @@ +from examples import config import asyncio import uuid diff --git a/examples/basic/agent_lifecycle_example.py b/examples/basic/agent_lifecycle_example.py index 29bb18c96..6639b981e 100644 --- a/examples/basic/agent_lifecycle_example.py +++ b/examples/basic/agent_lifecycle_example.py @@ -1,5 +1,6 @@ import asyncio import random +from examples import config from typing import Any from pydantic import BaseModel diff --git a/examples/config.py b/examples/config.py new file mode 100644 index 000000000..ee2c2ca96 --- /dev/null +++ b/examples/config.py @@ -0,0 +1,9 @@ +import os +from agents import set_default_openai_client, AsyncOpenAI + +api_key = os.getenv("API_KEY") # Fetch the key from an env variable named API_KEY +base_url = "http://localhost:8080/v1" + +set_default_openai_client( + AsyncOpenAI(api_key=api_key, base_url=base_url) +) \ No newline at end of file diff --git a/examples/masaic-platform/agent-hands-off.py b/examples/masaic-platform/agent-hands-off.py new file mode 100644 index 000000000..273f148a0 --- /dev/null +++ b/examples/masaic-platform/agent-hands-off.py @@ -0,0 +1,29 @@ +from examples import config +from agents import Agent, Runner +import asyncio + +spanish_agent = Agent( + name="Spanish agent", + instructions="You only speak Spanish.", +) + +english_agent = Agent( + name="English agent", + instructions="You only speak English", +) + +triage_agent = Agent( + name="Triage agent", + instructions="Handoff to the appropriate agent based on the language of the request.", + handoffs=[spanish_agent, english_agent], +) + + +async def main(): + result = await Runner.run(triage_agent, input="Hola, ¿cómo estás?") + print(result.final_output) + # ¡Hola! Estoy bien, gracias por preguntar. ¿Y tú, cómo estás? + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/masaic-platform/function-call.py b/examples/masaic-platform/function-call.py new file mode 100644 index 000000000..bb705e787 --- /dev/null +++ b/examples/masaic-platform/function-call.py @@ -0,0 +1,26 @@ +from examples import config +import asyncio + +from agents import Agent, Runner, function_tool + + +@function_tool +def get_weather(city: str) -> str: + return f"The weather in {city} is sunny." + + +agent = Agent( + name="Hello world", + instructions="You are a helpful agent.", + tools=[get_weather], +) + + +async def main(): + result = await Runner.run(agent, input="What's the weather in Tokyo?") + print(result.final_output) + # The weather in Tokyo is sunny. + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/masaic-platform/hello-world.py b/examples/masaic-platform/hello-world.py new file mode 100644 index 000000000..b5cc1dd41 --- /dev/null +++ b/examples/masaic-platform/hello-world.py @@ -0,0 +1,13 @@ +from examples import config +from agents import Agent, Runner + +agent = Agent(name="Assistant", instructions="You are a helpful assistant") + +print(type(agent.model)) + +result = Runner.run_sync(agent, "Write a haiku about recursion in programming.") +print(result.final_output) + +# Code within the code, +# Functions calling themselves, +# Infinite loop's dance. \ No newline at end of file From 9adada8b2c1a4294d01fae50cc36c7fc43131772 Mon Sep 17 00:00:00 2001 From: JB Date: Sun, 23 Mar 2025 12:39:53 +0530 Subject: [PATCH 02/14] Updated to use open-responses api --- examples/agent_patterns/forcing_tool_use.py | 2 +- examples/agent_patterns/llm_as_a_judge.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/agent_patterns/forcing_tool_use.py b/examples/agent_patterns/forcing_tool_use.py index 3f4e35ae8..3cd0b652b 100644 --- a/examples/agent_patterns/forcing_tool_use.py +++ b/examples/agent_patterns/forcing_tool_use.py @@ -1,5 +1,5 @@ from __future__ import annotations - +from examples import config import asyncio from typing import Any, Literal diff --git a/examples/agent_patterns/llm_as_a_judge.py b/examples/agent_patterns/llm_as_a_judge.py index 5a46cc3eb..a85bcdf65 100644 --- a/examples/agent_patterns/llm_as_a_judge.py +++ b/examples/agent_patterns/llm_as_a_judge.py @@ -1,6 +1,7 @@ from __future__ import annotations import asyncio +from examples import config from dataclasses import dataclass from typing import Literal From 479bf936b660e986505021250f472b3e8dcfa4fc Mon Sep 17 00:00:00 2001 From: JB Date: Sun, 23 Mar 2025 13:53:01 +0530 Subject: [PATCH 03/14] Updated to use open-responses api --- examples/agent_patterns/output_guardrails.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/agent_patterns/output_guardrails.py b/examples/agent_patterns/output_guardrails.py index 526a08521..b1ecfd204 100644 --- a/examples/agent_patterns/output_guardrails.py +++ b/examples/agent_patterns/output_guardrails.py @@ -2,7 +2,7 @@ import asyncio import json - +from examples import config from pydantic import BaseModel, Field from agents import ( From 5147385158b470aa41d28cdecd57785bdfa346e0 Mon Sep 17 00:00:00 2001 From: JB Date: Sun, 23 Mar 2025 16:08:08 +0530 Subject: [PATCH 04/14] Updated to use open-responses api --- examples/agent_patterns/parallelization.py | 1 + examples/basic/dynamic_system_prompt.py | 1 + examples/basic/hello_world.py | 1 + examples/basic/hello_world_jupyter.py | 1 + examples/basic/lifecycle_example.py | 1 + examples/basic/stream_items.py | 2 +- examples/basic/stream_text.py | 1 + examples/basic/tools.py | 2 ++ examples/customer_service/main.py | 1 + examples/handoffs/message_filter.py | 1 + examples/handoffs/message_filter_streaming.py | 1 + examples/research_bot/agents/planner_agent.py | 1 + examples/research_bot/agents/search_agent.py | 1 + examples/research_bot/agents/writer_agent.py | 2 ++ 14 files changed, 16 insertions(+), 1 deletion(-) diff --git a/examples/agent_patterns/parallelization.py b/examples/agent_patterns/parallelization.py index 95613905d..788c1b67a 100644 --- a/examples/agent_patterns/parallelization.py +++ b/examples/agent_patterns/parallelization.py @@ -1,6 +1,7 @@ from examples import config import asyncio +from examples import config from agents import Agent, ItemHelpers, Runner, trace """ diff --git a/examples/basic/dynamic_system_prompt.py b/examples/basic/dynamic_system_prompt.py index 7bcf90c0c..23bbc63aa 100644 --- a/examples/basic/dynamic_system_prompt.py +++ b/examples/basic/dynamic_system_prompt.py @@ -1,3 +1,4 @@ +from examples import config import asyncio import random from typing import Literal diff --git a/examples/basic/hello_world.py b/examples/basic/hello_world.py index 169290d6f..601e85ae9 100644 --- a/examples/basic/hello_world.py +++ b/examples/basic/hello_world.py @@ -1,3 +1,4 @@ +from examples import config import asyncio from agents import Agent, Runner diff --git a/examples/basic/hello_world_jupyter.py b/examples/basic/hello_world_jupyter.py index c929a7c68..c8839c1a8 100644 --- a/examples/basic/hello_world_jupyter.py +++ b/examples/basic/hello_world_jupyter.py @@ -1,3 +1,4 @@ +from examples import config from agents import Agent, Runner agent = Agent(name="Assistant", instructions="You are a helpful assistant") diff --git a/examples/basic/lifecycle_example.py b/examples/basic/lifecycle_example.py index 9b365106b..a6d2768f8 100644 --- a/examples/basic/lifecycle_example.py +++ b/examples/basic/lifecycle_example.py @@ -1,5 +1,6 @@ import asyncio import random +from examples import config from typing import Any from pydantic import BaseModel diff --git a/examples/basic/stream_items.py b/examples/basic/stream_items.py index c1f2257a5..263f631b1 100644 --- a/examples/basic/stream_items.py +++ b/examples/basic/stream_items.py @@ -1,6 +1,6 @@ import asyncio import random - +from examples import config from agents import Agent, ItemHelpers, Runner, function_tool diff --git a/examples/basic/stream_text.py b/examples/basic/stream_text.py index a73c1feeb..fdf083483 100644 --- a/examples/basic/stream_text.py +++ b/examples/basic/stream_text.py @@ -1,5 +1,6 @@ import asyncio +from examples import config from openai.types.responses import ResponseTextDeltaEvent from agents import Agent, Runner diff --git a/examples/basic/tools.py b/examples/basic/tools.py index 8936065a5..59ecf69c7 100644 --- a/examples/basic/tools.py +++ b/examples/basic/tools.py @@ -1,5 +1,7 @@ import asyncio +from examples import config + from pydantic import BaseModel from agents import Agent, Runner, function_tool diff --git a/examples/customer_service/main.py b/examples/customer_service/main.py index bd802e228..bde0e61fd 100644 --- a/examples/customer_service/main.py +++ b/examples/customer_service/main.py @@ -4,6 +4,7 @@ import random import uuid +from examples import config from pydantic import BaseModel from agents import ( diff --git a/examples/handoffs/message_filter.py b/examples/handoffs/message_filter.py index b7fed6c17..328b0d70c 100644 --- a/examples/handoffs/message_filter.py +++ b/examples/handoffs/message_filter.py @@ -3,6 +3,7 @@ import json import random +from examples import config from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace from agents.extensions import handoff_filters diff --git a/examples/handoffs/message_filter_streaming.py b/examples/handoffs/message_filter_streaming.py index 63cb1de34..e0fb239ce 100644 --- a/examples/handoffs/message_filter_streaming.py +++ b/examples/handoffs/message_filter_streaming.py @@ -3,6 +3,7 @@ import json import random +from examples import config from agents import Agent, HandoffInputData, Runner, function_tool, handoff, trace from agents.extensions import handoff_filters diff --git a/examples/research_bot/agents/planner_agent.py b/examples/research_bot/agents/planner_agent.py index e80a8e656..0678bf02d 100644 --- a/examples/research_bot/agents/planner_agent.py +++ b/examples/research_bot/agents/planner_agent.py @@ -1,5 +1,6 @@ from pydantic import BaseModel +from examples import config from agents import Agent PROMPT = ( diff --git a/examples/research_bot/agents/search_agent.py b/examples/research_bot/agents/search_agent.py index 72cbc8e11..3a6b30a44 100644 --- a/examples/research_bot/agents/search_agent.py +++ b/examples/research_bot/agents/search_agent.py @@ -1,3 +1,4 @@ +from examples import config from agents import Agent, WebSearchTool from agents.model_settings import ModelSettings diff --git a/examples/research_bot/agents/writer_agent.py b/examples/research_bot/agents/writer_agent.py index 7b7d01a27..2f17d4a7d 100644 --- a/examples/research_bot/agents/writer_agent.py +++ b/examples/research_bot/agents/writer_agent.py @@ -1,4 +1,6 @@ # Agent used to synthesize a final report from the individual summaries. +from examples import config + from pydantic import BaseModel from agents import Agent From 4b01ef0f79c0771cb8e169a285ad0821e4cac0d3 Mon Sep 17 00:00:00 2001 From: JB Date: Mon, 24 Mar 2025 16:58:04 +0530 Subject: [PATCH 05/14] Updated config.py --- examples/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/config.py b/examples/config.py index ee2c2ca96..d9d90371f 100644 --- a/examples/config.py +++ b/examples/config.py @@ -1,8 +1,8 @@ import os from agents import set_default_openai_client, AsyncOpenAI -api_key = os.getenv("API_KEY") # Fetch the key from an env variable named API_KEY -base_url = "http://localhost:8080/v1" +api_key = os.getenv("OPENAI_API_KEY") or "" #Either set API_KEY in environment variable or put it directly here. +base_url = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. set_default_openai_client( AsyncOpenAI(api_key=api_key, base_url=base_url) From c7c034564ee6ca6ed0089fd2fb8e39188973d7d1 Mon Sep 17 00:00:00 2001 From: JB Date: Tue, 25 Mar 2025 11:02:29 +0530 Subject: [PATCH 06/14] Added examples to try out different possibilities with Open Response API. --- examples/masaic-platform/agent-hands-off.py | 29 ---------- examples/masaic-platform/function-call.py | 26 --------- examples/masaic-platform/hello-world.py | 13 ----- examples/open_responses/__init__.py | 3 + examples/open_responses/agent_hands_off.py | 57 +++++++++++++++++++ .../brave_search_agent_with_groq.py | 47 +++++++++++++++ .../brave_search_agent_with_groq_stream.py | 54 ++++++++++++++++++ examples/open_responses/common_patches.py | 22 +++++++ .../think_tool_agent_with_claude.py | 54 ++++++++++++++++++ examples/open_responses_built_in_tools.py | 25 ++++++++ 10 files changed, 262 insertions(+), 68 deletions(-) delete mode 100644 examples/masaic-platform/agent-hands-off.py delete mode 100644 examples/masaic-platform/function-call.py delete mode 100644 examples/masaic-platform/hello-world.py create mode 100644 examples/open_responses/__init__.py create mode 100644 examples/open_responses/agent_hands_off.py create mode 100644 examples/open_responses/brave_search_agent_with_groq.py create mode 100644 examples/open_responses/brave_search_agent_with_groq_stream.py create mode 100644 examples/open_responses/common_patches.py create mode 100644 examples/open_responses/think_tool_agent_with_claude.py create mode 100644 examples/open_responses_built_in_tools.py diff --git a/examples/masaic-platform/agent-hands-off.py b/examples/masaic-platform/agent-hands-off.py deleted file mode 100644 index 273f148a0..000000000 --- a/examples/masaic-platform/agent-hands-off.py +++ /dev/null @@ -1,29 +0,0 @@ -from examples import config -from agents import Agent, Runner -import asyncio - -spanish_agent = Agent( - name="Spanish agent", - instructions="You only speak Spanish.", -) - -english_agent = Agent( - name="English agent", - instructions="You only speak English", -) - -triage_agent = Agent( - name="Triage agent", - instructions="Handoff to the appropriate agent based on the language of the request.", - handoffs=[spanish_agent, english_agent], -) - - -async def main(): - result = await Runner.run(triage_agent, input="Hola, ¿cómo estás?") - print(result.final_output) - # ¡Hola! Estoy bien, gracias por preguntar. ¿Y tú, cómo estás? - - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/masaic-platform/function-call.py b/examples/masaic-platform/function-call.py deleted file mode 100644 index bb705e787..000000000 --- a/examples/masaic-platform/function-call.py +++ /dev/null @@ -1,26 +0,0 @@ -from examples import config -import asyncio - -from agents import Agent, Runner, function_tool - - -@function_tool -def get_weather(city: str) -> str: - return f"The weather in {city} is sunny." - - -agent = Agent( - name="Hello world", - instructions="You are a helpful agent.", - tools=[get_weather], -) - - -async def main(): - result = await Runner.run(agent, input="What's the weather in Tokyo?") - print(result.final_output) - # The weather in Tokyo is sunny. - - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/examples/masaic-platform/hello-world.py b/examples/masaic-platform/hello-world.py deleted file mode 100644 index b5cc1dd41..000000000 --- a/examples/masaic-platform/hello-world.py +++ /dev/null @@ -1,13 +0,0 @@ -from examples import config -from agents import Agent, Runner - -agent = Agent(name="Assistant", instructions="You are a helpful assistant") - -print(type(agent.model)) - -result = Runner.run_sync(agent, "Write a haiku about recursion in programming.") -print(result.final_output) - -# Code within the code, -# Functions calling themselves, -# Infinite loop's dance. \ No newline at end of file diff --git a/examples/open_responses/__init__.py b/examples/open_responses/__init__.py new file mode 100644 index 000000000..e333a2e3c --- /dev/null +++ b/examples/open_responses/__init__.py @@ -0,0 +1,3 @@ +# Make the examples directory into a package to avoid top-level module name collisions. +# This is needed so that mypy treats files like examples/customer_service/main.py and +# examples/researcher_app/main.py as distinct modules rather than both named "main". diff --git a/examples/open_responses/agent_hands_off.py b/examples/open_responses/agent_hands_off.py new file mode 100644 index 000000000..2292294cc --- /dev/null +++ b/examples/open_responses/agent_hands_off.py @@ -0,0 +1,57 @@ +import asyncio +import os +from openai import AsyncOpenAI + +from agents import Agent, Runner, function_tool, set_tracing_disabled +from agents.models.openai_responses import OpenAIResponsesModel + +""" +This example demonstrates how to create an agent that hands off using groq's 'qwen-2.5-32b' model. +Add groq's API to variable - API_KEY. +""" + +# Set custom parameters directly +BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. +API_KEY = os.getenv("GROK_API_KEY") or "" #Either set GROK_API_KEY in environment variable or put it directly here. +MODEL_NAME = "qwen-2.5-32b" + +# Define custom headers explicitly +custom_headers = { + "Authorization": f"Bearer {API_KEY}" +} + +# Create a custom OpenAI client with the custom URL, API key, and explicit headers via default_headers. +client = AsyncOpenAI( + base_url=BASE_URL, + api_key=API_KEY, + default_headers=custom_headers +) + +set_tracing_disabled(disabled=False) + +spanish_agent = Agent( + name="Spanish agent", + instructions="You only speak Spanish.", + model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client) +) + +english_agent = Agent( + name="English agent", + instructions="You only speak English", + model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client) +) + +triage_agent = Agent( + name="Triage agent", + instructions="Handoff to the appropriate agent based on the language of the request.", + handoffs=[spanish_agent, english_agent], + model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client) +) + +async def main(): + result = await Runner.run(triage_agent, input="Hola, ¿cómo estás?") + print(result.final_output) + # Expected output: "¡Hola! Estoy bien, gracias por preguntar. ¿Y tú, cómo estás?" + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/open_responses/brave_search_agent_with_groq.py b/examples/open_responses/brave_search_agent_with_groq.py new file mode 100644 index 000000000..e2ccc6e78 --- /dev/null +++ b/examples/open_responses/brave_search_agent_with_groq.py @@ -0,0 +1,47 @@ +import asyncio +import os +from . import common_patches +from openai import AsyncOpenAI +from agents import Agent, Runner, set_tracing_disabled +from agents.models.openai_responses import OpenAIResponsesModel +from examples.open_responses_built_in_tools import OpenResponsesBuiltInTools + +""" +This example demonstrates how to create an agent that uses the built-in brave_web_search tool to perform a web search using Groq model with Open Responses API. +""" + +BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. +API_KEY = os.getenv("GROK_API_KEY") or "" #Either set GROK_API_KEY in environment variable or put it directly here. +MODEL_NAME = "qwen-2.5-32b" + +custom_headers = { + "Authorization": f"Bearer {API_KEY}" +} + +client = AsyncOpenAI( + base_url=BASE_URL, + api_key=API_KEY, + default_headers=custom_headers +) + +set_tracing_disabled(disabled=False) + +brave_search_tool = OpenResponsesBuiltInTools(tool_name="brave_web_search") + +search_agent = Agent( + name="Brave Search Agent", + instructions=( + "You are a research assistant that uses Brave web search. " + "When given a query, perform a web search using Brave and provide a concise summary." + ), + tools=[brave_search_tool], + model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client) +) + +async def main(): + query = "Where did NVIDIA GTC happen in 2025 and what were the major announcements?" + result = await Runner.run(search_agent, input=query) + print("Final output:", result.final_output) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/open_responses/brave_search_agent_with_groq_stream.py b/examples/open_responses/brave_search_agent_with_groq_stream.py new file mode 100644 index 000000000..868d1b23c --- /dev/null +++ b/examples/open_responses/brave_search_agent_with_groq_stream.py @@ -0,0 +1,54 @@ +import asyncio +import os +from . import common_patches +from openai import AsyncOpenAI +from agents import Agent, Runner, set_tracing_disabled +from agents.models.openai_responses import OpenAIResponsesModel +from examples.open_responses_built_in_tools import OpenResponsesBuiltInTools +from openai.types.responses import ResponseTextDeltaEvent + +""" +This example demonstrates how to create an agent that uses the built-in brave_web_search tool to perform a web search using Groq model with Open Responses API streaming. +""" + +BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. +API_KEY = os.getenv("GROK_API_KEY") or "" #Either set GROK_API_KEY in environment variable or put it directly here. +MODEL_NAME = "qwen-2.5-32b" + +custom_headers = { + "Authorization": f"Bearer {API_KEY}" +} + +# Create the asynchronous client. +client = AsyncOpenAI( + base_url=BASE_URL, + api_key=API_KEY, + default_headers=custom_headers +) + +set_tracing_disabled(disabled=False) + +brave_search_tool = OpenResponsesBuiltInTools(tool_name="brave_web_search") + +search_agent = Agent( + name="Brave Search Streaming Agent", + instructions=( + "You are a research assistant that uses Brave web search. " + "When given a query, perform a web search using Brave and provide a concise summary." + ), + tools=[brave_search_tool], + model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client) +) + +async def main(): + query = "Where did NVIDIA GTC happen in 2025 and what were the major announcements?" + result = Runner.run_streamed(search_agent, input=query) + async for event in result.stream_events(): + # Process only raw response events. + if event.type == "raw_response_event": + if isinstance(event.data, ResponseTextDeltaEvent): + print(event.data.delta, end="", flush=True) + print("\nStream complete") + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/open_responses/common_patches.py b/examples/open_responses/common_patches.py new file mode 100644 index 000000000..29677b351 --- /dev/null +++ b/examples/open_responses/common_patches.py @@ -0,0 +1,22 @@ +from agents.models.openai_responses import Converter +from examples.open_responses_built_in_tools import OpenResponsesBuiltInTools + +""" +Common converter to convert OpenResponsesBuiltInTools to the correct format just like web_search, file_search tool with type as tool_name. +""" +_original_convert_tool = Converter._convert_tool + +def patched_convert_tool(tool): + if isinstance(tool, OpenResponsesBuiltInTools): + converted_tool = { + "name": '', + "description": '', + "parameters": tool.params_json_schema if tool.params_json_schema else {"additionalProperties": False}, + "strict": False, + "type": tool.tool_name # Our custom type. + } + return converted_tool, None + return _original_convert_tool(tool) + +# Apply the patch +Converter._convert_tool = patched_convert_tool \ No newline at end of file diff --git a/examples/open_responses/think_tool_agent_with_claude.py b/examples/open_responses/think_tool_agent_with_claude.py new file mode 100644 index 000000000..662e371ef --- /dev/null +++ b/examples/open_responses/think_tool_agent_with_claude.py @@ -0,0 +1,54 @@ +import asyncio +import os +from . import common_patches +from openai import AsyncOpenAI +from agents import Agent, Runner, set_tracing_disabled +from agents.models.openai_responses import OpenAIResponsesModel +from examples.open_responses_built_in_tools import OpenResponsesBuiltInTools + +""" +This example demonstrates how to create an agent that uses the built-in think tool to perform a sequence of thinking using Anthropics's Clause -3.7 Snonnet +model with Open Responses API. +""" + +# Set custom parameters. +BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. +API_KEY = os.getenv("CLAUDE_API_KEY") or "" #Either set GROK_API_KEY in environment variable or put it directly here. +MODEL_NAME = "claude-3-7-sonnet-20250219" + +# Define custom headers. +custom_headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {API_KEY}", + "x-model-provider": "claude" +} + +# Create a custom OpenAI client. +client = AsyncOpenAI( + base_url=BASE_URL, + api_key=API_KEY, + default_headers=custom_headers +) + +set_tracing_disabled(disabled=False) + +# Instantiate the custom think tool with tool_name "think". +think_tool = OpenResponsesBuiltInTools(tool_name="think") + +# Create the agent. +claude_agent_with_think_tool = Agent( + name="Claude Agent with Think Tool", + instructions=( + "You are an experienced system design architect. Use the think tool to cross confirm thoughts before preparing the final answer." + ), + tools=[think_tool], + model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client) +) + +async def main(): + # Since the conversation is embedded in the instructions, we pass an empty input. + result = await Runner.run(claude_agent_with_think_tool, input="Give me the guidelines on designing a multi-agent distributed system with the following constraints in mind: 1. compute costs minimal, 2. the system should be horizontally scalable, 3. the behavior should be deterministic.") + print("Final output:", result.final_output) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/open_responses_built_in_tools.py b/examples/open_responses_built_in_tools.py new file mode 100644 index 000000000..73aa46368 --- /dev/null +++ b/examples/open_responses_built_in_tools.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass +from agents.tool import FunctionTool +from agents.run_context import RunContextWrapper +""" +A generic, reusable tool class for Open Responses-based agents. All built-in tools can be passe in the request with tool_name. +""" + +@dataclass(init=False) +class OpenResponsesBuiltInTools(FunctionTool): + + tool_name: str + precomputed_result: str + + def __init__(self, tool_name: str): + # Store the provided tool name. + self.tool_name = tool_name + self.name = tool_name + self.description = tool_name + # Leave the parameters schema empty. + self.params_json_schema = {} + # Set a fixed, precomputed result. + self.precomputed_result = "Nothing to return" + # Set the on_invoke_tool callback to always return the fixed result. + self.on_invoke_tool = lambda ctx, input: self.precomputed_result + self.strict_json_schema = True \ No newline at end of file From e885368fa6c1261b3b78ccf5eb331715ed6689fb Mon Sep 17 00:00:00 2001 From: JB Date: Tue, 25 Mar 2025 12:41:37 +0530 Subject: [PATCH 07/14] Added readme for Open responses API examples --- examples/open_responses/README.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 examples/open_responses/README.md diff --git a/examples/open_responses/README.md b/examples/open_responses/README.md new file mode 100644 index 000000000..c83d4c1cf --- /dev/null +++ b/examples/open_responses/README.md @@ -0,0 +1,3 @@ +## Examples Built with OpenAI Agent SDK To Use Open Responses API Built In Tools +1. For SDK setup, see [Get Started](http://github.com/masaic-ai-platform/openai-agents-python?tab=readme-ov-file#get-started) +2. For detailed instructions to run examples refer [Running Agent Examples](https://github.com/masaic-ai-platform/api-draft/blob/main/docs/Quickstart.md#6-running-agent-examples-built-with-openai-agent-sdk-to-use-open-responses-api-built-in-tools) \ No newline at end of file From 2313655f030e60bda040a322b4f53807153589f9 Mon Sep 17 00:00:00 2001 From: JB Date: Tue, 25 Mar 2025 12:43:46 +0530 Subject: [PATCH 08/14] Updated format --- examples/open_responses/README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/open_responses/README.md b/examples/open_responses/README.md index c83d4c1cf..50c4094fe 100644 --- a/examples/open_responses/README.md +++ b/examples/open_responses/README.md @@ -1,3 +1,7 @@ + ## Examples Built with OpenAI Agent SDK To Use Open Responses API Built In Tools -1. For SDK setup, see [Get Started](http://github.com/masaic-ai-platform/openai-agents-python?tab=readme-ov-file#get-started) -2. For detailed instructions to run examples refer [Running Agent Examples](https://github.com/masaic-ai-platform/api-draft/blob/main/docs/Quickstart.md#6-running-agent-examples-built-with-openai-agent-sdk-to-use-open-responses-api-built-in-tools) \ No newline at end of file + +1. For SDK setup, see Get Started +2. For detailed instructions to run examples refer Running Agent Examples + +--- \ No newline at end of file From 7b43cc5437d2e0c4d292aa17e9e7979603ed991e Mon Sep 17 00:00:00 2001 From: JB Date: Tue, 25 Mar 2025 15:27:15 +0530 Subject: [PATCH 09/14] Updated Hyperlink. --- examples/open_responses/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/open_responses/README.md b/examples/open_responses/README.md index 50c4094fe..08d0dc317 100644 --- a/examples/open_responses/README.md +++ b/examples/open_responses/README.md @@ -2,6 +2,6 @@ ## Examples Built with OpenAI Agent SDK To Use Open Responses API Built In Tools 1. For SDK setup, see Get Started -2. For detailed instructions to run examples refer Running Agent Examples +2. For detailed instructions to run examples refer Running Agent Examples --- \ No newline at end of file From 6a610b3424ce35372e23113423c379d38af2f183 Mon Sep 17 00:00:00 2001 From: amant Date: Tue, 1 Apr 2025 18:13:42 +0530 Subject: [PATCH 10/14] conversation state example --- examples/open_responses/conversation_state.py | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 examples/open_responses/conversation_state.py diff --git a/examples/open_responses/conversation_state.py b/examples/open_responses/conversation_state.py new file mode 100644 index 000000000..5f1cf9938 --- /dev/null +++ b/examples/open_responses/conversation_state.py @@ -0,0 +1,44 @@ + +from openai import OpenAI +import os + +# Set custom parameters directly +BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. +API_KEY = os.getenv("GROK_API_KEY") or "" #Either set GROK_API_KEY in environment variable or put it directly here. +MODEL_NAME = "qwen-2.5-32b" + +# Define custom headers explicitly +custom_headers = { + "Authorization": f"Bearer {API_KEY}" +} + +# Create a custom OpenAI client with the custom URL, API key, and explicit headers via default_headers. +client = OpenAI( + base_url=BASE_URL, + api_key=API_KEY, + default_headers=custom_headers +) + +history = [ + { + "role": "user", + "content": "tell me a joke" + } +] + +response = client.responses.create( + model=MODEL_NAME, + input=history, + store=True +) + +print(response.output_text) + +# Add the response to the conversation + +second_response = client.responses.create( + model=MODEL_NAME, + previous_response_id=response.id, + input=[{"role": "user", "content": "explain why this is funny."}], +) +print(second_response.output_text) \ No newline at end of file From be70934eef2dcb1682ded3a7d5bffd09454dc118 Mon Sep 17 00:00:00 2001 From: JB Date: Sat, 5 Apr 2025 15:02:52 +0530 Subject: [PATCH 11/14] Fixed typo from GROK to GROQ. Also, added openAI SDK and Hello world Agent SDK example. --- examples/open_responses/agent_hands_off.py | 2 +- .../brave_search_agent_with_groq.py | 2 +- .../brave_search_agent_with_groq_stream.py | 2 +- examples/open_responses/hello_world_agent.py | 25 +++++++++++++++++++ examples/open_responses/openai-sdk-example.py | 10 ++++++++ .../think_tool_agent_with_claude.py | 2 +- 6 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 examples/open_responses/hello_world_agent.py create mode 100644 examples/open_responses/openai-sdk-example.py diff --git a/examples/open_responses/agent_hands_off.py b/examples/open_responses/agent_hands_off.py index 2292294cc..6e6e24466 100644 --- a/examples/open_responses/agent_hands_off.py +++ b/examples/open_responses/agent_hands_off.py @@ -12,7 +12,7 @@ # Set custom parameters directly BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. -API_KEY = os.getenv("GROK_API_KEY") or "" #Either set GROK_API_KEY in environment variable or put it directly here. +API_KEY = os.getenv("GROQ_API_KEY") or "" #Either set GROQ_API_KEY in environment variable or put it directly here. MODEL_NAME = "qwen-2.5-32b" # Define custom headers explicitly diff --git a/examples/open_responses/brave_search_agent_with_groq.py b/examples/open_responses/brave_search_agent_with_groq.py index e2ccc6e78..08d71182f 100644 --- a/examples/open_responses/brave_search_agent_with_groq.py +++ b/examples/open_responses/brave_search_agent_with_groq.py @@ -11,7 +11,7 @@ """ BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. -API_KEY = os.getenv("GROK_API_KEY") or "" #Either set GROK_API_KEY in environment variable or put it directly here. +API_KEY = os.getenv("GROQ_API_KEY") or "" #Either set GROQ_API_KEY in environment variable or put it directly here. MODEL_NAME = "qwen-2.5-32b" custom_headers = { diff --git a/examples/open_responses/brave_search_agent_with_groq_stream.py b/examples/open_responses/brave_search_agent_with_groq_stream.py index 868d1b23c..c3b3cba7d 100644 --- a/examples/open_responses/brave_search_agent_with_groq_stream.py +++ b/examples/open_responses/brave_search_agent_with_groq_stream.py @@ -12,7 +12,7 @@ """ BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. -API_KEY = os.getenv("GROK_API_KEY") or "" #Either set GROK_API_KEY in environment variable or put it directly here. +API_KEY = os.getenv("GROQ_API_KEY") or "" #Either set GROQ_API_KEY in environment variable or put it directly here. MODEL_NAME = "qwen-2.5-32b" custom_headers = { diff --git a/examples/open_responses/hello_world_agent.py b/examples/open_responses/hello_world_agent.py new file mode 100644 index 000000000..e8dac7252 --- /dev/null +++ b/examples/open_responses/hello_world_agent.py @@ -0,0 +1,25 @@ +import os +import asyncio +from openai import AsyncOpenAI + +from agents import Agent, Runner +from agents.models.openai_responses import OpenAIResponsesModel + + +client = AsyncOpenAI(base_url="http://localhost:8080/v1", api_key=os.getenv("OPENAI_API_KEY"), default_headers={'x-model-provider': 'openai'}) +async def main(): + agent = Agent( + name="Assistant", + instructions="You are a humorous poet who can write funny poems of 4 lines.", + model=OpenAIResponsesModel(model="gpt-4o-mini", openai_client=client) + ) + + result = await Runner.run(agent, "Write a poem on Masaic.") + print(result.final_output) + # Function calls itself, + # Looping in smaller pieces, + # Endless by design. + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/open_responses/openai-sdk-example.py b/examples/open_responses/openai-sdk-example.py new file mode 100644 index 000000000..cc112806c --- /dev/null +++ b/examples/open_responses/openai-sdk-example.py @@ -0,0 +1,10 @@ +import os +from openai import OpenAI + +openai_client = OpenAI(base_url="http://localhost:8080/v1", api_key=os.getenv("OPENAI_API_KEY"), default_headers={'x-model-provider': 'openai'}) + +response = openai_client.responses.create( + model="gpt-4o-mini", + input="Write a poem on Masaic" +) +print("Generated response:", response.output[0].content[0].text) \ No newline at end of file diff --git a/examples/open_responses/think_tool_agent_with_claude.py b/examples/open_responses/think_tool_agent_with_claude.py index 662e371ef..c870e1cb3 100644 --- a/examples/open_responses/think_tool_agent_with_claude.py +++ b/examples/open_responses/think_tool_agent_with_claude.py @@ -13,7 +13,7 @@ # Set custom parameters. BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" #Either set OPEN_RESPONSES_URL in environment variable or put it directly here. -API_KEY = os.getenv("CLAUDE_API_KEY") or "" #Either set GROK_API_KEY in environment variable or put it directly here. +API_KEY = os.getenv("CLAUDE_API_KEY") or "" #Either set GROQ_API_KEY in environment variable or put it directly here. MODEL_NAME = "claude-3-7-sonnet-20250219" # Define custom headers. From 08cca72ec6c9fb4f6cfbb295044c2bb292a08de2 Mon Sep 17 00:00:00 2001 From: JB Date: Sat, 5 Apr 2025 19:23:58 +0530 Subject: [PATCH 12/14] Added loaded generation python program for agent_hands_off and brave_search_agent use case. --- .../agent_hands_off_for_load_generation.py | 148 +++++++++++++++++ .../brave_search_agent_for_load_generation.py | 149 ++++++++++++++++++ 2 files changed, 297 insertions(+) create mode 100644 examples/open_responses/agent_hands_off_for_load_generation.py create mode 100644 examples/open_responses/brave_search_agent_for_load_generation.py diff --git a/examples/open_responses/agent_hands_off_for_load_generation.py b/examples/open_responses/agent_hands_off_for_load_generation.py new file mode 100644 index 000000000..56474fe8c --- /dev/null +++ b/examples/open_responses/agent_hands_off_for_load_generation.py @@ -0,0 +1,148 @@ +import asyncio +import os +from openai import AsyncOpenAI + +from agents import Agent, Runner, set_tracing_disabled +from agents.models.openai_responses import OpenAIResponsesModel + +""" +This program runs the agent handoff example for multiple iterations. +It allows selecting different model providers (groq, openai, claude) and runs the test +for a specified number of iterations (default: 10) with parallel execution (default: 5 concurrent runs). +""" + +# Base URL for all providers +BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" + +# Model mapping for different providers +MODEL_MAPPING = { + "groq": { + "name": "qwen-2.5-32b", + "api_key_env": "GROQ_API_KEY", + "headers": lambda api_key: { + "Authorization": f"Bearer {api_key}" + } + }, + "openai": { + "name": "gpt-4o", + "api_key_env": "OPENAI_API_KEY", + "headers": lambda api_key: { + "Authorization": f"Bearer {api_key}", + "x-model-provider": "openai" + } + }, + "claude": { + "name": "claude-3-7-sonnet-20250219", + "api_key_env": "CLAUDE_API_KEY", + "headers": lambda api_key: { + "Content-Type": "application/json", + "Authorization": f"Bearer {api_key}", + "x-model-provider": "claude" + } + } +} + +def get_client_for_provider(provider): + """Create an OpenAI client configured for the specified provider.""" + provider_config = MODEL_MAPPING.get(provider) + if not provider_config: + raise ValueError(f"Unknown provider: {provider}. Available providers: {', '.join(MODEL_MAPPING.keys())}") + + api_key_env = provider_config["api_key_env"] + api_key = os.getenv(api_key_env) or "" + + if not api_key: + raise ValueError(f"API key for {provider} not found. Please set {api_key_env} environment variable.") + + custom_headers = provider_config["headers"](api_key) + + return AsyncOpenAI( + base_url=BASE_URL, + api_key=api_key, + default_headers=custom_headers + ) + +async def run_single_iteration(iteration, triage_agent, input_text): + """Run a single iteration of the handoff test.""" + try: + result = await Runner.run(triage_agent, input=input_text) + print(f"Iteration {iteration+1} completed with result: {result.final_output}") + return result.final_output + except Exception as e: + error_message = f"ERROR: {e}" + print(f"Iteration {iteration+1} failed with error: {error_message}") + return error_message + +async def run_handoff_test(provider="groq", num_iterations=10, concurrency=5, input_text="Hola, ¿cómo estás?"): + """Run the handoff test for the specified number of iterations with parallel execution.""" + client = get_client_for_provider(provider) + model_name = MODEL_MAPPING[provider]["name"] + + # Disable tracing to reduce output noise during load testing + set_tracing_disabled(disabled=True) + + # Create agents + spanish_agent = Agent( + name="Spanish agent", + instructions="You only speak Spanish.", + model=OpenAIResponsesModel(model=model_name, openai_client=client) + ) + + english_agent = Agent( + name="English agent", + instructions="You only speak English", + model=OpenAIResponsesModel(model=model_name, openai_client=client) + ) + + triage_agent = Agent( + name="Triage agent", + instructions="Handoff to the appropriate agent based on the language of the request.", + handoffs=[spanish_agent, english_agent], + model=OpenAIResponsesModel(model=model_name, openai_client=client) + ) + + print(f"\nRunning {num_iterations} iterations with concurrency of {concurrency}...\n") + + # Create tasks for all iterations + tasks = [] + for i in range(num_iterations): + tasks.append(run_single_iteration(i, triage_agent, input_text)) + + # Run tasks with semaphore to limit concurrency + semaphore = asyncio.Semaphore(concurrency) + + async def run_with_semaphore(task): + async with semaphore: + return await task + + # Execute tasks with limited concurrency + results = await asyncio.gather(*[run_with_semaphore(task) for task in tasks]) + + # Print summary + print("\n===== SUMMARY =====") + print(f"Total iterations: {num_iterations}") + print(f"Successful responses: {len([r for r in results if not r.startswith('ERROR')])}") + print(f"Failed responses: {len([r for r in results if r.startswith('ERROR')])}") + + return results + +async def main(): + # Ask for model provider + print("Available model providers: groq, openai, claude") + provider = input("Enter model provider (default: groq): ").lower() or "groq" + + # Ask for number of iterations + iterations_input = input("Enter number of iterations (default: 10): ") + iterations = int(iterations_input) if iterations_input.strip() else 10 + + # Hard-coded concurrency level of 5 + concurrency = 5 + + # Ask for input text + input_text = input("Enter input text (default: 'Hola, ¿cómo estás?'): ") or "Hola, ¿cómo estás?" + + # Run the test + await run_handoff_test(provider, iterations, concurrency, input_text) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/open_responses/brave_search_agent_for_load_generation.py b/examples/open_responses/brave_search_agent_for_load_generation.py new file mode 100644 index 000000000..2e71bf551 --- /dev/null +++ b/examples/open_responses/brave_search_agent_for_load_generation.py @@ -0,0 +1,149 @@ +import asyncio +import os +from openai import AsyncOpenAI +from agents import Agent, Runner, set_tracing_disabled +from agents.models.openai_responses import OpenAIResponsesModel +from examples.open_responses_built_in_tools import OpenResponsesBuiltInTools + +""" +This program runs the brave search agent example for multiple iterations. +It allows selecting different model providers (groq, openai, claude) and runs the test +for a specified number of iterations (default: 10) with parallel execution (fixed: 5 concurrent runs). +""" + +# Base URL for all providers +BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" + +# Model mapping for different providers +MODEL_MAPPING = { + "groq": { + "name": "qwen-2.5-32b", + "api_key_env": "GROQ_API_KEY", + "headers": lambda api_key: { + "Authorization": f"Bearer {api_key}" + } + }, + "openai": { + "name": "gpt-4o", + "api_key_env": "OPENAI_API_KEY", + "headers": lambda api_key: { + "Authorization": f"Bearer {api_key}", + "x-model-provider": "openai" + } + }, + "claude": { + "name": "claude-3-7-sonnet-20250219", + "api_key_env": "CLAUDE_API_KEY", + "headers": lambda api_key: { + "Content-Type": "application/json", + "Authorization": f"Bearer {api_key}", + "x-model-provider": "claude" + } + } +} + +def get_client_for_provider(provider): + """Create an OpenAI client configured for the specified provider.""" + provider_config = MODEL_MAPPING.get(provider) + if not provider_config: + raise ValueError(f"Unknown provider: {provider}. Available providers: {', '.join(MODEL_MAPPING.keys())}") + + api_key_env = provider_config["api_key_env"] + api_key = os.getenv(api_key_env) or "" + + if not api_key: + raise ValueError(f"API key for {provider} not found. Please set {api_key_env} environment variable.") + + custom_headers = provider_config["headers"](api_key) + + return AsyncOpenAI( + base_url=BASE_URL, + api_key=api_key, + default_headers=custom_headers + ) + +async def run_single_iteration(iteration, search_agent, input_text): + """Run a single iteration of the brave search test.""" + try: + result = await Runner.run(search_agent, input=input_text) + print(f"Iteration {iteration+1} completed with result: {result.final_output[:150]}...") # Truncate long results + return result.final_output + except Exception as e: + error_message = f"ERROR: {e}" + print(f"Iteration {iteration+1} failed with error: {error_message}") + return error_message + +async def run_brave_search_test(provider="groq", num_iterations=10, concurrency=5, input_text="Where did NVIDIA GTC happen in 2025 and what were the major announcements?"): + """Run the brave search test for the specified number of iterations with parallel execution.""" + client = get_client_for_provider(provider) + model_name = MODEL_MAPPING[provider]["name"] + + # Disable tracing to reduce output noise during load testing + set_tracing_disabled(disabled=True) + + # Create the brave search tool + brave_search_tool = OpenResponsesBuiltInTools(tool_name="brave_web_search") + + # Create the search agent + search_agent = Agent( + name="Brave Search Agent", + instructions=( + "You are a research assistant that uses Brave web search. " + "When given a query, perform a web search using Brave and provide a concise summary." + ), + tools=[brave_search_tool], + model=OpenAIResponsesModel(model=model_name, openai_client=client) + ) + + print(f"\nRunning {num_iterations} iterations with concurrency of {concurrency}...\n") + + # Create tasks for all iterations + tasks = [] + for i in range(num_iterations): + tasks.append(run_single_iteration(i, search_agent, input_text)) + + # Run tasks with semaphore to limit concurrency + semaphore = asyncio.Semaphore(concurrency) + + async def run_with_semaphore(task): + async with semaphore: + return await task + + # Execute tasks with limited concurrency + results = await asyncio.gather(*[run_with_semaphore(task) for task in tasks]) + + # Print summary + print("\n===== SUMMARY =====") + print(f"Total iterations: {num_iterations}") + print(f"Successful responses: {len([r for r in results if not r.startswith('ERROR')])}") + print(f"Failed responses: {len([r for r in results if r.startswith('ERROR')])}") + + return results + +async def main(): + # Ask for model provider + print("Available model providers: groq, openai, claude") + provider = input("Enter model provider (default: groq): ").lower() or "groq" + + # Ask for number of iterations + iterations_input = input("Enter number of iterations (default: 10): ") + iterations = int(iterations_input) if iterations_input.strip() else 10 + + # Hard-coded concurrency level of 5 + concurrency = 5 + + # Ask for input text + default_query = "Where did NVIDIA GTC happen in 2025 and what were the major announcements?" + input_text = input(f"Enter search query (default: '{default_query}'): ") or default_query + + # Run the test + await run_brave_search_test(provider, iterations, concurrency, input_text) + +if __name__ == "__main__": + # Apply common patches for OpenResponsesBuiltInTools + try: + from examples.open_responses import common_patches + except ImportError: + print("Warning: Could not import common_patches. Brave search may not work correctly.") + + asyncio.run(main()) \ No newline at end of file From 0602a50341c02fe9dd0a2784d45787d4c2b9cc20 Mon Sep 17 00:00:00 2001 From: JB Date: Mon, 7 Apr 2025 16:08:13 +0530 Subject: [PATCH 13/14] Updated example instruction --- examples/open_responses/hello_world_agent.py | 2 +- examples/open_responses/openai-sdk-example.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/open_responses/hello_world_agent.py b/examples/open_responses/hello_world_agent.py index e8dac7252..b678f0bb3 100644 --- a/examples/open_responses/hello_world_agent.py +++ b/examples/open_responses/hello_world_agent.py @@ -14,7 +14,7 @@ async def main(): model=OpenAIResponsesModel(model="gpt-4o-mini", openai_client=client) ) - result = await Runner.run(agent, "Write a poem on Masaic.") + result = await Runner.run(agent, "Tell me a joke") print(result.final_output) # Function calls itself, # Looping in smaller pieces, diff --git a/examples/open_responses/openai-sdk-example.py b/examples/open_responses/openai-sdk-example.py index cc112806c..689f7d549 100644 --- a/examples/open_responses/openai-sdk-example.py +++ b/examples/open_responses/openai-sdk-example.py @@ -5,6 +5,6 @@ response = openai_client.responses.create( model="gpt-4o-mini", - input="Write a poem on Masaic" + input="Tell me a joke" ) print("Generated response:", response.output[0].content[0].text) \ No newline at end of file From ea02544e0c618cc51e18e05bdb0b306e6cdda863 Mon Sep 17 00:00:00 2001 From: amant Date: Fri, 25 Apr 2025 15:42:37 +0530 Subject: [PATCH 14/14] rag, file and agentic search tool example --- examples/open_responses/rag.py | 379 ++++++++++++++++++++ examples/open_responses/sample_document.txt | 78 ++++ 2 files changed, 457 insertions(+) create mode 100644 examples/open_responses/rag.py create mode 100644 examples/open_responses/sample_document.txt diff --git a/examples/open_responses/rag.py b/examples/open_responses/rag.py new file mode 100644 index 000000000..84e6d5e5b --- /dev/null +++ b/examples/open_responses/rag.py @@ -0,0 +1,379 @@ +import asyncio +import os +import json +import requests +from openai import AsyncOpenAI +from dataclasses import dataclass +from agents import Agent, Runner, set_tracing_disabled +from agents.models.openai_responses import OpenAIResponsesModel, Converter +from agents.tool import FunctionTool, FileSearchTool +from typing import Dict, Any + +""" +This example demonstrates how to create an agent that uses the built-in agentic_search tool +to perform RAG-based search queries using the OpenResponses API. +""" + +# Patch the Converter's _convert_tool method to handle our custom tool type +_original_convert_tool = Converter._convert_tool + +def patched_convert_tool(tool): + if isinstance(tool, AgenticSearchTool): + # Create a tool definition in the format expected by the API + tool_def = { + "type": "agentic_search", + "vector_store_ids": tool.vector_store_ids, + "max_num_results": tool.max_num_results, + "max_iterations": tool.max_iterations, + "seed_strategy": tool.seed_strategy, + "alpha": tool.alpha, + "initial_seed_multiplier": tool.initial_seed_multiplier, + "filters": tool.filters + } + return tool_def, None + return _original_convert_tool(tool) + +# Apply the patch +Converter._convert_tool = patched_convert_tool + +# Define a local version of the tool to avoid import issues +@dataclass(init=False) +class AgenticSearchTool(FunctionTool): + tool_name: str + vector_store_ids: list + max_num_results: int + max_iterations: int + seed_strategy: str + alpha: float + initial_seed_multiplier: int + filters: dict + + def __init__(self, tool_name, **kwargs): + # Store the provided tool name and attributes + self.tool_name = tool_name + self.name = tool_name + self.description = tool_name + # Leave the parameters schema empty + self.params_json_schema = {} + # Set a fixed, precomputed result + self.precomputed_result = "Nothing to return" + # Set the on_invoke_tool callback to always return the fixed result + self.on_invoke_tool = lambda ctx, input: self.precomputed_result + self.strict_json_schema = True + + # Set any additional attributes passed as kwargs + for key, value in kwargs.items(): + setattr(self, key, value) + +BASE_URL = os.getenv("OPEN_RESPONSES_URL") or "http://localhost:8080/v1" +API_KEY = os.getenv("OPENAI_API_KEY") # throw error if not set +if not API_KEY: + raise ValueError("Please set the OPENAI_API_KEY environment variable.") +MODEL_NAME = "openai@gpt-4.1-mini" # You can change this to your preferred model + +custom_headers = { + "Authorization": f"Bearer {API_KEY}" +} + +client = AsyncOpenAI( + base_url=BASE_URL, + api_key=API_KEY, + default_headers=custom_headers +) + +set_tracing_disabled(disabled=False) + +# Create the agentic_search tool with additional configuration parameters +agentic_search_tool = AgenticSearchTool( + tool_name="agentic_search", + vector_store_ids=[], + max_num_results=5, + max_iterations=10, + seed_strategy="hybrid", + alpha=0.7, + initial_seed_multiplier=3, + filters={ + "type": "and", + "filters": [ + { + "type": "eq", + "key": "category", + "value": "documentation" + }, + { + "type": "eq", + "key": "language", + "value": "en" + } + ] + } +) + +rag_agent = Agent( + name="RAG Search Agent", + instructions=( + "You are a research assistant that uses RAG-based search. " + "When given a query, perform a search against the document vector store and provide a comprehensive analysis " + "based on the retrieved information." + ), + tools=[agentic_search_tool], + model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client) +) + +def upload_file(file_path): + """ + Upload a file to the OpenResponses API. + + Args: + file_path: Path to the file to upload + + Returns: + file_id: ID of the uploaded file + """ + url = f"{BASE_URL}/files" + headers = {"Authorization": f"Bearer {API_KEY}"} + + with open(file_path, "rb") as f: + files = {"file": f} + data = {"purpose": "user_data"} + + response = requests.post(url, headers=headers, files=files, data=data) + response_json = response.json() + + print(f"File uploaded: {response_json}") + return response_json.get("id") + +def create_vector_store(name): + """ + Create a vector store in the OpenResponses API. + + Args: + name: Name of the vector store + + Returns: + vector_store_id: ID of the created vector store + """ + url = f"{BASE_URL}/vector_stores" + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {API_KEY}" + } + + data = json.dumps({"name": name}) + + response = requests.post(url, headers=headers, data=data) + response_json = response.json() + + print(f"Vector store created: {response_json}") + return response_json.get("id") + +def add_file_to_vector_store(vector_store_id, file_id, category="documentation", language="en"): + """ + Add a file to a vector store in the OpenResponses API. + + Args: + vector_store_id: ID of the vector store + file_id: ID of the file to add + category: Category attribute for the file + language: Language attribute for the file + """ + url = f"{BASE_URL}/vector_stores/{vector_store_id}/files" + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {API_KEY}" + } + + data = { + "file_id": file_id, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 1000, + "chunk_overlap_tokens": 200 + } + }, + "attributes": { + "category": category, + "language": language + } + } + + response = requests.post(url, headers=headers, data=json.dumps(data)) + print(f"File added to vector store: {response.json()}") + +def setup_rag_system(file_path, vector_store_name): + """ + Set up the RAG system by uploading a file, creating a vector store, and adding the file to it. + + Args: + file_path: Path to the file to upload + vector_store_name: Name for the vector store + + Returns: + vector_store_id: ID of the created vector store + """ + # Upload file + file_id = upload_file(file_path) + + # Create vector store + vector_store_id = create_vector_store(vector_store_name) + + # Add file to vector store + add_file_to_vector_store(vector_store_id, file_id) + + return vector_store_id + +async def call_direct_api(vector_store_id, query): + """ + Demonstrate calling the agentic_search API directly without the agent framework + + Args: + vector_store_id: ID of the vector store to search + query: The search query + + Returns: + The response from the API + """ + url = f"{BASE_URL}/responses" + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {API_KEY}" + } + + payload = { + "model": MODEL_NAME, + "tools": [{ + "type": "agentic_search", + "vector_store_ids": [vector_store_id], + "max_num_results": 5, + "max_iterations": 10, + "seed_strategy": "hybrid", + "alpha": 0.7, + "initial_seed_multiplier": 3, + "filters": { + "type": "and", + "filters": [ + { + "type": "eq", + "key": "category", + "value": "documentation" + }, + { + "type": "eq", + "key": "language", + "value": "en" + } + ] + } + }], + "input": query, + "instructions": "Search for the answer to the query using the agentic_search tool." + } + + response = requests.post(url, headers=headers, data=json.dumps(payload)) + return response.json() + +async def main(): + # Set up the RAG system with our sample document + file_path = os.path.join(os.path.dirname(__file__), "sample_document.txt") + vector_store_name = "ml-documentation" + + + print("Setting up new vector store...") + vector_store_id = setup_rag_system(file_path, vector_store_name) + print(f"Created vector store with ID: {vector_store_id}") + print(f"Update VECTOR_STORE_ID in the script with this value for future runs.") + + # Option 1: Use the agent framework with our custom AgenticSearchTool + # Update the agent's tool with the new vector store ID + global agentic_search_tool + agentic_search_tool = AgenticSearchTool( + tool_name="agentic_search", + vector_store_ids=[vector_store_id], + max_num_results=5, + max_iterations=10, + seed_strategy="hybrid", + alpha=0.7, + initial_seed_multiplier=3, + filters={ + "type": "and", + "filters": [ + { + "type": "eq", + "key": "category", + "value": "documentation" + }, + { + "type": "eq", + "key": "language", + "value": "en" + } + ] + } + ) + + global rag_agent + rag_agent = Agent( + name="RAG Search Agent", + instructions=( + "You are a research assistant that uses RAG-based search. " + "When given a query, perform a search against the document vector store and provide a comprehensive analysis " + "based on the retrieved information." + ), + tools=[agentic_search_tool], + model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client) + ) + + # Run a query using the RAG agent with AgenticSearchTool + print("\nOption 1: Running agentic search query using agent framework...") + query = "What are the three types of machine learning and their key differences?" + result = await Runner.run(rag_agent, input=query) + print("\nFinal output:", result.final_output) + + # Option 2: Use direct API call + print("\nOption 2: Running agentic search query using direct API call...") + api_result = await call_direct_api(vector_store_id, query) + print("\nDirect API response:", api_result.get("output", "No output returned")) + + # Option 3: Use the built-in FileSearchTool + # Create a FileSearchTool instance + file_search_tool = FileSearchTool( + vector_store_ids=[vector_store_id], + max_num_results=5, + include_search_results=True, + filters={ + "type": "and", + "filters": [ + { + "type": "eq", + "key": "category", + "value": "documentation" + }, + { + "type": "eq", + "key": "language", + "value": "en" + } + ] + } + ) + + # Create an agent with the FileSearchTool + file_search_agent = Agent( + name="File Search Agent", + instructions=( + "You are a research assistant that uses file search. " + "When given a query, perform a search against the document vector store and provide a comprehensive analysis " + "based on the retrieved information." + ), + tools=[file_search_tool], + model=OpenAIResponsesModel(model=MODEL_NAME, openai_client=client) + ) + + # Run a query using the file search agent + print("\nOption 3: Running search query using built-in FileSearchTool...") + file_search_result = await Runner.run(file_search_agent, input=query) + print("\nFileSearchTool output:", file_search_result.final_output) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/open_responses/sample_document.txt b/examples/open_responses/sample_document.txt new file mode 100644 index 000000000..07cb6bd96 --- /dev/null +++ b/examples/open_responses/sample_document.txt @@ -0,0 +1,78 @@ +# Machine Learning Fundamentals +## Introduction + +Machine learning is a branch of artificial intelligence (AI) that focuses on building systems that learn from data. Unlike traditional programming where explicit instructions are provided, machine learning algorithms use statistical methods to enable machines to improve with experience. + +## Types of Machine Learning + +### 1. Supervised Learning + +Supervised learning algorithms learn from labeled training data, making predictions or decisions based on past examples. The algorithm receives a set of inputs along with the corresponding correct outputs, and it learns by comparing its actual output with correct outputs to find errors and modify the model accordingly. + +Common supervised learning algorithms include: +- Linear Regression +- Logistic Regression +- Support Vector Machines (SVM) +- Decision Trees and Random Forests +- Neural Networks +- K-Nearest Neighbors (KNN) + +### 2. Unsupervised Learning + +Unsupervised learning algorithms learn from unlabeled data. These algorithms try to find patterns or intrinsic structures in the input data without being explicitly taught what to look for. + +Common unsupervised learning algorithms include: +- K-means Clustering +- Hierarchical Clustering +- Principal Component Analysis (PCA) +- Autoencoders +- Generative Adversarial Networks (GANs) + +### 3. Reinforcement Learning + +Reinforcement learning is about taking suitable actions to maximize reward in a particular situation. It differs from supervised learning in that correct input/output pairs are never presented, nor incorrect actions explicitly corrected. + +Common reinforcement learning algorithms include: +- Q-Learning +- Deep Q Network (DQN) +- Proximal Policy Optimization (PPO) +- Actor-Critic Methods + +## The Machine Learning Process + +1. **Data Collection**: Gathering relevant data from various sources. +2. **Data Preprocessing**: Cleaning, normalizing, and transforming raw data into a suitable format. +3. **Feature Engineering**: Selecting and transforming the most relevant variables. +4. **Model Selection**: Choosing appropriate algorithms based on the problem type. +5. **Training**: Teaching the model using the prepared dataset. +6. **Evaluation**: Assessing model performance using metrics like accuracy, precision, and recall. +7. **Deployment**: Implementing the model in a production environment. +8. **Monitoring**: Continuously tracking the model's performance and updating as needed. + +## Common Challenges in Machine Learning + +- **Overfitting**: When a model learns the training data too well, including noise and outliers, leading to poor performance on new data. +- **Underfitting**: When a model is too simple to capture the underlying pattern of the data. +- **Data Quality**: Insufficient, biased, or noisy data can lead to poor model performance. +- **Feature Selection**: Choosing the right features is crucial for model performance. +- **Interpretability vs. Performance**: More complex models often perform better but are harder to interpret. + +## Emerging Trends + +- **Deep Learning**: Neural networks with many layers that can learn complex patterns. +- **Transfer Learning**: Applying knowledge from one trained model to another related task. +- **Federated Learning**: Training models across multiple devices without exchanging data. +- **AutoML**: Automating the end-to-end process of applying machine learning. +- **Explainable AI**: Developing methods to understand and interpret complex model decisions. + +## Ethical Considerations + +As machine learning becomes more prevalent, it's crucial to address ethical concerns: +- **Bias and Fairness**: Ensuring models don't perpetuate or amplify existing biases. +- **Privacy**: Protecting sensitive data used for training. +- **Transparency**: Making model decisions understandable to users. +- **Accountability**: Determining responsibility when automated systems make mistakes. + +## Conclusion + +Machine learning continues to evolve rapidly, offering powerful tools for solving complex problems across various domains. Understanding its fundamentals, processes, and ethical implications is essential for developing responsible and effective AI systems. \ No newline at end of file