Hi
I'm currently building a LangChain application with tracing on Phoenix.
When I use
from langchain_google_genai import ChatGoogleGenerativeAI
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro-001",
temperature=0,
max_tokens=None,
max_retries=6,
stop=None,)My token usage was getting traced. but when I change to
from langchain_google_vertexai import VertexAI
# To use model
model = VertexAI(
model="gemini-1.5-pro-002",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
safety_settings=safety_settings,
)
I'm able to trace the token usage anymore.
from openinference.instrumentation.langchain import LangChainInstrumentor
google.cloud.bigquery.opentelemetry_tracing.HAS_OPENTELEMETRY = False
if "PHOENIX_CLIENT_HEADERS" not in os.environ:
try:
with open(phoenix_api_path, "r") as file:
api_key = file.read().strip()
os.environ['PHOENIX_CLIENT_HEADERS'] = api_key
except:
os.environ["PHOENIX_CLIENT_HEADERS"] = getpass.getpass("Enter your Arize Phoenix API key: ")
tracer_provider = register(
project_name="demo", # Default is 'default'
endpoint="https://app.phoenix.arize.com/v1/traces",
)
LangChainInstrumentor().instrument(tracer_provider=tracer_provider)
Am I missing any parameter?