Yes Dustin N., Django requests usually run in worker thread. So how do we work around that, any ideas?
Hello Mikyo and team, I'm working on a RAG based LLM. I've tested Phoenix for evaluation in a notebook environment. Now I'm trying to use it in my Django-React application. I'm using the Phoenix code in a function in views.py, after a request comes from the frontend, through the urls in django this function (phoenix_eval) is called. But at the llm_classify line, I encounter an error. How to workaround this? Error handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ValueError: signal only works in main thread of the main interpreter Function import sqlalchemy import asyncio import nest_asyncio nest_asyncio.apply() import os from getpass import getpass import matplotlib.pyplot as plt import openai import pandas as pd from phoenix.experimental.evals import ( HALLUCINATION_PROMPT_RAILS_MAP, HALLUCINATION_PROMPT_TEMPLATE, RAG_RELEVANCY_PROMPT_RAILS_MAP, RAG_RELEVANCY_PROMPT_TEMPLATE, TOXICITY_PROMPT_RAILS_MAP, TOXICITY_PROMPT_TEMPLATE, RAG_RELEVANCY_PROMPT_RAILS_MAP, RAG_RELEVANCY_PROMPT_TEMPLATE, OpenAIModel, download_benchmark_dataset, llm_classify) # import phoenix.experimental.evals.templates.default_templates as templates # from pycm import ConfusionMatrix from sklearn.metrics import classification_report from chatbot import test @api_view(['GET']) def phoenix_eval(request): if request.method == "GET": database="postgres" user="postgres" password="pg1234" host="localhost" port= '5432' server = 'postgresql' conn = sqlalchemy.create_engine(server + "://" + database + ":" + password + "@localhost:" + port + "/" + database) fetch_query = ''' SELECT * FROM chat_history1 ''' df = pd.read_sql_query(fetch_query, conn) #To fetch table df = df[['question', 'answer','context']] #if columns name are different df.rename(columns={"question": "input", "answer": "output","context":"reference"},inplace=True) azure_model = OpenAIModel( model_name="qkgpt4", temperature=0.0, api_key="1c0d153fba6945bd833d9cb03f1b67a3", azure_endpoint="https://qkgenai.openai.azure.com/", api_version="2023-05-15", ) print("Enter") print(__name__) rails = list(HALLUCINATION_PROMPT_RAILS_MAP.values()) hallucination_classifications = llm_classify( dataframe=df, template=HALLUCINATION_PROMPT_TEMPLATE, model=azure_model, rails=rails, concurrency=20 )["label"].tolist() df["ph_hallucination"]=hallucination_classifications print("Exit") df.rename(columns={"input":"question", "output":"answer","reference":"context"},inplace=True) print(df) df.to_sql(name='Eval_score', con=conn, if_exists='append', index=False) print('Done') json_response = df.to_json(orient='records') return HttpResponse(json_response, content_type='application/json')
