|
|
(12 intermediate revisions by the same user not shown) |
Line 1: |
Line 1: |
| import os, sys, json, re
| | Kotoba |
| import pandas as pd
| |
| import numpy as np
| |
| os.environ['LAV_DIR'] = '/home/gmare/lav/'
| |
| dL = os.listdir(os.environ['LAV_DIR']+'/src/')
| |
| sys.path = list(set(sys.path + [os.environ['LAV_DIR']+'/src/'+x for x in dL]))
| |
| import kotoba.chatbot_utils as c_t
| |
| import kotoba.chatbot_unify as c_u
| |
| import kotoba.chatbot_prompt as c_p
| |
| import kotoba.pdf_tools as p_t
| |
| import kotoba.table_chat as t_c
| |
| import importlib
| |
| | |
| importlib.reload(c_t)
| |
| importlib.reload(c_p)
| |
| baseDir = os.environ['HOME'] + '/lav/soft/raw/'
| |
| pdf_doc = baseDir + 'Policies.pdf'
| |
| pdf_doc = baseDir + 'data_proc.pdf'
| |
| headers_split = [("####","Chapter"),("######","Section"),('########','Subsection')]
| |
| pdf_doc = baseDir + 'panasonic_airconditioner_manual.pdf'
| |
| headers_split = [('#',"Title"),("##","Chapter"),("####","Section"),("###","Subsection")]
| |
| pdf_doc = baseDir + 'leipzig/kompendium.pdf'
| |
| #pdf_doc = baseDir + 'BaroneLamberto2.pdf'
| |
| collN = re.sub(".pdf","",pdf_doc).split("/")[-1]
| |
| | |
| with open(baseDir + 'leipzig/kompendium.md','r') as f:
| |
| md_text = f.read()
| |
| | |
| importlib.reload(p_t)
| |
| docL = p_t.split_text(md_text,headers_split)
| |
| print(len(docL))
| |
| | |
| text = docL[2].page_content
| |
| llm = c_t.get_llm_bedrock()
| |
| insD = t_c.md2df(text,llm)
| |
| respL = []
| |
| qL = ["Was ist versichert?","Was ist in compact Tariff versichert dass nicht in classic Tariff versichert ist?","Was ist in classic Tariff versichert dass nicht in compact Tariff versichert ist?"]
| |
| for q in qL:
| |
| resp = insD.chat(q)
| |
| respL.append({"question":q,"answer":resp})
| |
|
| |
| | |
|
| |
| | |
| if False: #caption images
| |
| importlib.reload(c_t)
| |
| docL = c_t.pdf2md(pdf_doc,headers_split)
| |
| collT = c_t.create_collection(docL,collN,baseDir)
| |
| vectT = c_t.faiss_vector_storage(docL,collN,baseDir)
| |
| vectT = c_t.create_neo4j(docL,collN,baseDir,os.environ['NEO4J_PASS'])
| |
|
| |
| if False: #caption images
| |
| importlib.reload(c_t)
| |
| fL = os.listdir(baseDir + collN)
| |
| imgL = c_t.image_description(baseDir + collN,fL)
| |
| collI = c_t.create_collection(imgL,collN + "_img",baseDir)
| |
| | |
| if False: # create summaries
| |
| importlib.reload(c_t)
| |
| llm = c_t.get_llm_bedrock()
| |
| summL = c_t.section_summary(docL,llm)
| |
| collS = c_t.create_collection(summL,collN + "_summary",baseDir)
| |
| vectS = c_t.faiss_vector_storage(summL,collN + "_summary",baseDir)
| |
|
| |
| if False:
| |
| #docL = c_t.pdf_page([pdf_doc])
| |
| #docL = c_t.pdf2tree(pdf_doc)
| |
| docL = c_t.pdf2md(pdf_doc)
| |
| collT, collS = c_t.create_collection_summary(docL,collN,baseDir,llm)
| |
| else:
| |
| importlib.reload(c_t)
| |
| c_t.list_collection(baseDir)
| |
| collT = c_t.load_chroma(collN,baseDir)
| |
| collS = c_t.load_chroma(collN + "_summary",baseDir)
| |
| collI = c_t.load_chroma(collN + "_img",baseDir)
| |
| vectT = c_t.get_vectorstore(collN,baseDir)
| |
| retrT = c_t.get_chroma_retriever(collN,baseDir)
| |
| | |
| q = "Where is the error code table"
| |
| resL = vectT.similarity_search(query=q,k=5)
| |
| print("\n".join([str(x.metadata) for x in resL]))
| |
| resL = retrT.invoke(q)
| |
| print("\n".join([str(x.metadata) for x in resL]))
| |
| | |
| | |
| | |
|
| |
| llm = c_t.get_llm_bedrock()
| |
| res = c_t.search_neo4j(q,llm,collN,os.environ['NEO4J_PASS'])
| |
| | |
| | |
| | |
|
| |
| for doc in results:
| |
| print(f"* {doc.page_content} [{doc.metadata}]")
| |
|
| |
| retriever = vectT.as_retriever(search_type="mmr", search_kwargs={"k": 1, "fetch_k": 5})
| |
| #retriever.invoke("Error code 53", filter={"source": "news"})
| |
| print(retriever.invoke("Error code 53"))
| |
| # index = c_t.load_faiss(pdf_doc,baseDir)
| |
| # query_engine = index.as_query_engine()
| |
| # response = query_engine.query(q)
| |
| # print(response.response)
| |
| # n = response.source_nodes[0]
| |
| | |
| if False: #langchain
| |
| importlib.reload(c_p)
| |
| importlib.reload(c_t)
| |
| llm = c_t.get_llm()
| |
| chain = c_t.get_chain_confidence(llm,collN,baseDir)
| |
| resL = []
| |
| for i, aud in audD.iterrows():
| |
| print("%0.2f" % (100.*i/audD.shape[0]),end="\r")
| |
| q = aud['audit_question_en']
| |
| if q == '' or q != q:
| |
| continue
| |
| try:
| |
| ans = c_t.format_confidence(chain.invoke(q))
| |
| except:
| |
| continue
| |
| res['question'] = q
| |
| res['pred_answer'] = ans['answer']
| |
| res['pred_justification'] = ans['confidence']
| |
| res['pred_context'] = ''
| |
| res["ref_justification"] = aud['exp_reference_en']
| |
| res['ref_context'] = aud['Content of BAIT Chapter (all)']
| |
| res['ref_answer'] = aud['exp_result']
| |
| resL.append(res)
| |
| | |
| evalDf = pd.DataFrame(resL)
| |
| evalDf.to_csv(baseDir + "pred_" + modN + ".csv",index=False)
| |
| | |
| | |
| resp = requests.get('https://api.unify.ai/v0/models',headers={"Authorization":"Bearer " + os.environ['UNIFY_KEY']})
| |
| modL = resp.text
| |
| modL = ["gpt-4o@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic"]
| |
| #selL = collT.get(include=[],limit=5,offset=1)
| |
| db = c_t.get_vectorstore(collN,baseDir)
| |
| importlib.reload(c_u)
| |
| for j, m in enumerate(modL): # unify
| |
| try:
| |
| unify = c_u.get_unify(modL[j])
| |
| except:
| |
| continue
| |
| modN = modL[j].split("@")[0]
| |
| print(modN)
| |
| resL = []
| |
| for i, aud in audD.iterrows():
| |
| print("%0.2f" % (100.*i/audD.shape[0]),end="\r")
| |
| q = aud['audit_question_en']
| |
| if q == '' or q != q:
| |
| continue
| |
| retL = db.similarity_search_with_relevance_scores(q)
| |
| retS = "\n".join([x[0].metadata['s'] for x in retL])
| |
| ansS = c_u.ask_rag(q,retS,unify)
| |
| ansD = eval("{"+ans+"}")
| |
| res = {}
| |
| yes = False
| |
| try:
| |
| if re.search(c_u.yesRe,ansD['Answer'].split(",")[0]):
| |
| yes = True
| |
| except:
| |
| if re.search(c_u.yesRe,ansS):
| |
| yes = True
| |
| res['pred_answer'] = yes
| |
| res['pred_justification'] = ans
| |
| res['pred_context'] = retS
| |
| res['question'] = q
| |
| res["ref_justification"] = aud['exp_reference_en']
| |
| res['ref_context'] = aud['Content of BAIT Chapter (all)']
| |
| res['ref_answer'] = aud['exp_result']
| |
| resL.append(res)
| |
| | |
| evalDf = pd.DataFrame(resL)
| |
| evalDf.to_csv(baseDir + "pred_" + modN + ".csv",index=False)
| |
| | |
|
| |
| | |
| print("te se qe te ve be te ne?")
| |