thx Roger Y. I'm trying to execute the following code, but in my instance of phoenix in http://localhost:6006/ doesn't track anything but if i use the code in python works, can you hel me pls?
ASP.NET.CORE
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
using OpenTelemetry.Exporter;
using Microsoft.AspNetCore.Builder;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using System.Diagnostics;
var builder = WebApplication.CreateBuilder(args);
builder.Logging.ClearProviders();
builder.Services.AddOpenTelemetryTracing(tracerProviderBuilder =>
{
tracerProviderBuilder
.SetResourceBuilder(
ResourceBuilder.CreateDefault()
.AddService(builder.Environment.ApplicationName))
.AddAspNetCoreInstrumentation()
.AddHttpClientInstrumentation()
.AddOtlpExporter(options =>
{
options.Endpoint = new Uri("http://127.0.0.1:6006/v1/traces");
options.Protocol = OtlpExportProtocol.HttpProtobuf;
});
});
var app = builder.Build();
app.MapGet("/", () => "Hello from OpenTelemetry Tracing!");
app.MapGet("/simulate-llm-call", () =>
{
using var activitySource = new ActivitySource("SimulateLLMCall");
using var activity = activitySource.StartActivity("LLM Call Simulation", ActivityKind.Client);
activity?.SetTag("llm.model_name", "gpt-3.5-turbo");
activity?.SetTag("llm.function_call", "{function_name: 'generate_text', args: ['Hello, world!']}");
activity?.SetTag("llm.response", "Hello, this is a response from a simulated LLM call.");
return "LLM call simulated and traced!";
});
app.Run();
python code:
import openai
from openinference.instrumentation.openai import OpenAIInstrumentor
from opentelemetry import trace as trace_api
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
endpoint = "http://127.0.0.1:6006/v1/traces"
tracer_provider = trace_sdk.TracerProvider()
tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint)))
tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))
trace_api.set_tracer_provider(tracer_provider)
OpenAIInstrumentor().instrument()
if __name__ == "__main__":
client = openai.OpenAI()
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "write a haiku."}],
max_tokens=20,
)
print(response.choices[0].message.content)