馃敀[private user] yes , sharing the code snippetfrom autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
import asyncio
from phoenix.otel import register
from openinference.instrumentation.openai import OpenAIInstrumentor
import os
import asyncio
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
PHOENIX_API_KEY=os.environ["PHOENIX_API_KEY"]
PHOENIX_COLLECTOR_ENDPOINT=os.environ["PHOENIX_COLLECTOR_ENDPOINT"]
tracer_provider = register(
project_name="autogen-test", # <-- CRITICAL: API Key
auto_instrument=True # <-- Recommended for production stability
)
OpenAIInstrumentor().instrument(tracer_provider=tracer_provider)
# Define a model client. You can use other model client that implements
# the `ChatCompletionClient` interface.
model_client = OpenAIChatCompletionClient(
model="gpt-4o-mini",
api_key=os.getenv("OPENAI_API_KEY")
)
# Define a simple function tool that the agent can use.
# For this example, we use a fake weather tool for demonstration purposes.
async def get_weather(city: str) -> str:
"""Get the weather for a given city."""
return f"The weather in {city} is 73 degrees and Sunny."
# Define an AssistantAgent with the model, tool, system message, and reflection enabled.
# The system message instructs the agent via natural language.
agent = AssistantAgent(
name="weather_agent",
model_client=model_client,
tools=[get_weather],
system_message="You are a helpful assistant.",
reflect_on_tool_use=True,
model_client_stream=True, # Enable streaming tokens from the model client.
)
# Run the agent and stream the messages to the console.
async def main() -> None:
await Console(agent.run_stream(task="What is the weather in New York?"))
# Close the connection to the model client.
await model_client.close()
# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).
#await main()
asyncio.run(main())