Using `use_project` with LlamaTrace: Setup and Troubleshooting Guide
how do I use use_project with llamatrace I was using it but not the library isnt being recognized:
! pip install "llama-index-core>=0.10.43" "openinference-instrumentation-llama-index>=2" "opentelemetry-proto>=1.12.0"
! pip install "arize-phoenix[evals,llama-index]"
# Azure-specific dependencies
! pip install azure-search-documents==11.5.1
! pip install llama-index-vector-stores-azureaisearch
! pip install llama-index-embeddings-azure-openai
! pip install llama-index-llms-azure-openai
# Additional tools and utilities
! pip install python-dotenv
from tqdm import tqdm
import json
from openinference.instrumentation import using_metadata
from phoenix.trace import using_project
# Load all evaluation questions from queries.jsonl
eval_questions = []
with open("eval/queries.jsonl", "r") as file:
for line in file:
# Parse each line as JSON and extract the query
json_line = json.loads(line.strip())
eval_questions.append(json_line)
# List of query engines and their respective project names
query_engines = [
(keyword_query_engine, "Keyword"),
(hybrid_query_engine, "Hybrid"),
(semantic_hybrid_query_engine, "Semantic_Hybrid"),
]
# Loop through each question and query it against each engine
for query_data in tqdm(eval_questions):
query = query_data["query"]
query_classification = query_data.get("query_classification", "undefined") # Default to 'undefined' if not present
for engine, project_name in query_engines:
try:
metadata = query_classification
with using_project(project_name), using_metadata(metadata):
# Assuming the query method expects a string query and returns results
engine.query(query)
except Exception as e:
print(f"Error querying {project_name} for query '{query}': {e}")