Integrating Tool Outputs in Trace Logs for Enhanced Debugging
import logging
import ast
from langchain.tools import Tool
from main.agents.tools import primary_tool_functions
from opentelemetry.trace import Status, StatusCode
# Define log levels
logging.basicConfig(level=logging.INFO)
def pick_tools(requested_tools: str,
app_id: str,
app_configs: dict, tracer):
"""
This method is used to return list of tool objects
Parameters:
requested_tools (str): requested tool list for the app.
app_id (str): Current app id
app_configs (dict): app wise objects
Returns:
list: tool objects
"""
with tracer.start_as_current_span("pick_tools") as span:
span.set_attribute("openinference.span.kind", "TOOL")
span.set_attribute("input.requested_tools", requested_tools)
span.set_attribute("input.app_id", app_id)
try:
init_object_list = app_configs[app_id]
requested_tools = ast.literal_eval(requested_tools)
# to return requested tools
tool_list = []
tools_dict = {
"Search VectorDB": Tool(
name="Search VectorDB",
func=lambda query, init_pinecone=init_object_list[1], init_embeddings=init_object_list[0]:
primary_tool_functions.doc_search(query=query, pinecone=init_pinecone, embedding_instance=init_embeddings,
tracer=tracer),
description="""
useful for when assistant needs to query through vector DB and pick top most similar documents to the
given query
"""
),
"Query SQL DB": Tool(
name="Query SQL DB",
func=lambda query, init_sql=init_object_list[2], sql_prompt=init_object_list[3]:
primary_tool_functions.query_sql(query=query, sql_agent=init_sql, sql_agent_prompt=sql_prompt, tracer=tracer),
description="""
useful when assistant needs to query from SQL database for a given statistical question and return data
from SQL database
"""
),
"Create Graphs": Tool(
name="Create Graphs",
func=lambda query, init_llm_obj=init_object_list[4]:
primary_tool_functions.create_graphs(query=query, llm_instance=init_llm_obj, tracer=tracer),
description="""
useful when assistant needs to create graphs after retrieving the output from 'Query SQL DB' tool
"""
)
}
for tool in requested_tools:
tool_list.append(tools_dict[tool])
span.set_attribute("output.value", str(tool_list))
return tool_list
except Exception as e:
span.set_status(Status(StatusCode.ERROR, str(e)))
raise e
import ast
import opentelemetry.trace as trace
from opentelemetry.trace.status import Status, StatusCode
# Tool 1
def doc_search(query, pinecone, embedding_instance, tracer):
with tracer.start_as_current_span("document-search") as span:
try:
span.set_attribute("openinference.span.kind", "RETRIEVER")
span.set_attribute("input.query", query)
embedded_query = embedding_instance.embed(query)
retrieved_docs, source, topic, page_link = pinecone.return_docs(embedded_query, 4)
span.set_attribute("output.retrieved_docs", str(retrieved_docs))
span.set_status(Status(StatusCode.OK))
except Exception as e:
span.set_status(Status(StatusCode.ERROR, str(e)))
raise
return retrieved_docs, source, topic, page_link
# Tool 2
def query_sql(query, sql_agent, sql_agent_prompt, tracer):
with tracer.start_as_current_span("query_sql") as span:
try:
span.set_attribute("openinference.span.kind", "TOOL")
span.set_attribute("input.query", query)
span.set_attribute("input.prompt", sql_agent_prompt)
generated_query = sql_agent.get_query(query, sql_agent_prompt)
span.set_attribute("output.generated_query", generated_query)
span.set_status(Status(StatusCode.OK))
except Exception as e:
span.set_status(Status(StatusCode.ERROR, str(e)))
raise
return generated_query
# Tool 3
def create_graphs(query, llm_instance, tracer):
with tracer.start_as_current_span("create_graphs") as span:
try:
span.set_attribute("openinference.span.kind", "TOOL")
span.set_attribute("input.query", query)
_plotting_prompt = """
Assistant will be provided with 2D list. What assistant has to do is create a Mermaid.js script to generate a
bar graph or pie chart from the given 2D list. Assistant can choose what should be taked as X and Y axis.
Output must be only a Mermaid.js script and nothin else rather than the Mermaid.js script.
Here are some sample user queries and assistant outputs,
```
User : [["amal", "kamal", "anura"], [2, 3, 4]]
Assistant:
bar
title Users and their scores
x-axis Users
y-axis Scores
"amal" : 2
"kamal" : 3
"anura" : 4
```
"""
response = llm_instance.invoke(_plotting_prompt + f"\n 2D list : {query}")
formatted_response = f"<div class='mermaid-graph'>{response}</div>"
span.set_attribute("output.response", formatted_response)
span.set_status(Status(StatusCode.OK))
return formatted_response
except Exception as e:
span.set_status(Status(StatusCode.ERROR, str(e)))
return formatted_responseFor the relevant tool picked, the trace should display the output of the selected tool's trace. For example lets say the selected tool is 'Search VectorDB', then the trace output should e the output of the 'doc_search()' function within the 'pick tools' function. See the image. under 'pick-tools' phoenix should show the doc_search methods trace details.
