No edit summary
(Blanked the page)
Tags: Blanking Manual revert
Line 1: Line 1:
import os, sys, json, re
import pandas as pd
import numpy as np
os.environ['LAV_DIR'] = '/home/gmare/lav/'
dL = os.listdir(os.environ['LAV_DIR']+'/src/')
sys.path = list(set(sys.path + [os.environ['LAV_DIR']+'/src/'+x for x in dL]))
import kotoba.chatbot_utils as c_t
import kotoba.chatbot_unify as c_u
import kotoba.chatbot_prompt as c_p
import kotoba.pdf_tools as p_t
import kotoba.table_chat as t_c
import kotoba.model_call as m_c
import kotoba.text_clean as t_l
import importlib


importlib.reload(c_t)
importlib.reload(c_p)
importlib.reload(p_t)
os.environ['AWS_DEFAULT_PROFILE'] = 'default'
os.environ['AWS_DEFAULT_PROFILE'] = 'leipziger'
modL = ['amazon.titan-tg1-large', 'amazon.titan-text-premier-v1:0', 'amazon.titan-text-lite-v1', 'amazon.titan-text-express-v1', 'anthropic.claude-instant-v1', 'anthropic.claude-v2:1', 'anthropic.claude-v2', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mixtral-8x7b-instruct-v0:1', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-small-2402-v1:0']
# modL = c_t.test_modelList()
baseDir = os.environ['HOME'] + '/lav/soft/raw/'
headers_split = [("####","Chapter"),("######","Section"),('########','Subsection')]
pdf_doc = baseDir + 'panasonic_airconditioner_manual.pdf'
headers_split = [('#',"Title"),("##","Kapitel"),("####","Section")]
pdf_doc = baseDir + 'leipzig/Wohngebaeudeversicherung.pdf'
pdf_doc = baseDir + 'leipzig/Zutrittsschutz.pdf'
collN = re.sub(".pdf","",pdf_doc).split("/")[-1]
qaD = pd.read_csv(baseDir + "leipzig/frage_antwort2.csv")
with open(baseDir + 'leipzig/Zutrittsschutz.md','r') as f:
    md_text = f.read()
docL = p_t.split_text(md_text,headers_split)
print(len(docL))
from langchain_openai import ChatOpenAI
from langchain.agents import Tool, AgentExecutor, create_tool_calling_agent
from langchain_community.tools import DuckDuckGoSearchRun
from langchain_core.prompts import ChatPromptTemplate
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import ( SimpleDirectoryReader,VectorStoreIndex,StorageContext, load_index_from_storage, Settings)
from llama_index.core.tools import QueryEngineTool, ToolMetadata
llm = c_t.get_llm_bedrock(modL[8])
embed_model = c_t.get_embeddings_bedrock()
querT = c_t.get_chroma_query(collN,baseDir,model_id=modL[8])
coll_desc = "Die Wohngebäudeversicherung ist eine wichtige Absicherung für Hausbesitzer und Mieter. Sie schützt das Gebäude und den Inhalt vor Schäden durch Feuer, Wasser, Sturm und andere Ereignisse. Die Versicherung deckt auch Schäden durch Vandalismus und Diebstahl ab. Die Höhe der Versicherungssumme sollte auf den Wert des Gebäudes und des Inhalts abgestimmt sein, um den Wert der Immobilie zu schützen. Die Prämie für die Wohngebäudeversicherung hängt von verschiedenen Faktoren ab, wie der Größe des Gebäudes, der Art des Gebäudes"
query_engine_tools = [QueryEngineTool(query_engine=querT,metadata=ToolMetadata(name=collN,description=(coll_desc)))]
retriever_tools = [t.to_langchain_tool() for t in query_engine_tools]
lang_tool = QueryEngineTool(query_engine=querT,metadata=ToolMetadata(name=collN,description=(coll_desc))).to_langchain_tool()
search = DuckDuckGoSearchRun()
duckduckgo_tool = Tool(name='DuckDuckGoSearch',func= search.run,description='Use for when you need to perform an internet search to find information that another tool can not provide.') 
langchain_tools = [duckduckgo_tool]
tools = retriever_tools + langchain_tools
system_context = "Sie sind ein Versicherungsassistent und helfen Kunden, die Versicherungsbedingungen zu verstehen. Bitte halten Sie sich an die im Referenzdokument enthaltenen Informationen und beantworten Sie kurz Fragen zu den Versicherungsbedingungen für die Versicherung von Immobilien."
prompt = ChatPromptTemplate.from_messages([("system",system_context,),("placeholder", "{chat_history}"),("human", "{input}"),("placeholder", "{agent_scratchpad}"),])
agent = create_tool_calling_agent(llm, tools, prompt,)
agent_executor = AgentExecutor(agent=agent, tools=retriever_tools, verbose=True, return_intermediate_steps=True, handle_parsing_errors=True, max_iterations=10)
if False: #document collection
    importlib.reload(c_t)
    #docL = c_t.pdf2md(pdf_doc,headers_split)
    collT = c_t.create_collection(docL,collN,baseDir)
    vectT = c_t.faiss_vector_storage(docL,collN,baseDir)
    #vectT = c_t.create_neo4j(docL,collN,baseDir,os.environ['NEO4J_PASS'])
if False:#keywords
    importlib.reload(t_l)
    importlib.reload(c_t)
    keyL = c_t.create_keywords(docL)
    collK = c_t.create_collection(keyL,collN + "_key",baseDir)
    vectK = c_t.faiss_vector_storage(keyL,collN + "_key",baseDir)
   
if False: #caption images
    importlib.reload(c_t)
    fL = os.listdir(baseDir + collN)
    imgL = c_t.image_description(baseDir + collN,fL)
    collI = c_t.create_collection(imgL,collN + "_img",baseDir)
if False: # create summaries
    importlib.reload(c_t)
    llm = c_t.get_llm_bedrock(modL[3])
    summL = c_t.section_summary(docL,llm,collN)
    collS = c_t.create_collection(summL,collN + "_summary",baseDir)
    vectS = c_t.faiss_vector_storage(summL,collN + "_summary",baseDir)
if False:
    importlib.reload(t_l)
    keyL = t_l.extract_keyword(docL[3].page_content,n=40)
    keyL = t_l.extract_keyword(q,n=3)
    retL = c_t.search_keywords(docL,["Erdsenkung"])
   
if False:
    llm = c_t.get_llm_bedrock(modL[0])
    query = "Please translate from German to English the following paragraph:\n"
    query += """
    """
    resp = llm.invoke(query)
    print(resp.content)
   
if False:
    importlib.reload(c_t)
    importlib.reload(p_t)
    importlib.reload(t_c)
    importlib.reload(m_c)
    c_t.list_collection(baseDir)
    faisT = c_t.load_faiss(collN,baseDir)
    #collI = c_t.load_chroma(collN + "_img",baseDir)
    importlib.reload(c_t)
    querT = c_t.get_chroma_query(collN,baseDir,model_id=modL[8])
    retrT = c_t.get_chroma_retriever(collN,baseDir)
    vectT = c_t.get_vectorstore(collN,baseDir)
    vectK = c_t.get_vectorstore(collN+"_key",baseDir)
    retrT = c_t.get_chroma_retriever(collN,baseDir)
    collS = c_t.load_chroma(collN + "_summary",baseDir)
    tabS = docL[3].page_content
    tabS = md_text.strip()
   
    querT = c_t.get_chroma_query(collN,baseDir,model_id=modL[8])
    llm = c_t.get_llm_bedrock(modL[8])
    respL = []
    for i, row in qaD.iterrows():
          print(i)
          q = row['question']
          # response = querT.query(q)
          # citeS = ""
          # for n in response.source_nodes:
          #      citeS += n.text + "\n\n"
          resp = t_c.ask_table_langchain(llm,q,tabS)
          response.response
          respL.append({"tab":resp})
    respL = pd.DataFrame(respL)
    respD = pd.concat([qaD,respL],axis=1)
    #respD.to_csv(baseDir + "leipzig/frage_antwort2.csv",index=False)
    retriever = vectT.as_retriever(search_type="mmr", search_kwargs={"k": 1, "fetch_k": 5})
    # index = c_t.load_faiss(pdf_doc,baseDir)
    # query_engine = index.as_query_engine()
    # response = query_engine.query(q)
    # print(response.response)
    # n = response.source_nodes[0]
if False: #langchain
    importlib.reload(c_p)
    importlib.reload(c_t)
    llm = c_t.get_llm()
    chain = c_t.get_chain_confidence(llm,collN,baseDir)
    resL = []
    for i, aud in audD.iterrows():
        print("%0.2f" % (100.*i/audD.shape[0]),end="\r")
        q = aud['audit_question_en']
        if q == '' or q != q:
            continue
        try:
            ans = c_t.format_confidence(chain.invoke(q))
        except:
            continue
        res['question'] = q
        res['pred_answer'] = ans['answer']
        res['pred_justification'] = ans['confidence']
        res['pred_context'] = ''
        res["ref_justification"] = aud['exp_reference_en']
        res['ref_context'] = aud['Content of BAIT Chapter (all)']
        res['ref_answer'] = aud['exp_result']
        resL.append(res)
    evalDf = pd.DataFrame(resL)
    evalDf.to_csv(baseDir + "pred_" + modN + ".csv",index=False)
if False:
    resp = requests.get('https://api.unify.ai/v0/models',headers={"Authorization":"Bearer " + os.environ['UNIFY_KEY']})
    modL = resp.text
    modL = ["gpt-4o@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic"]
    #selL = collT.get(include=[],limit=5,offset=1)
    db = c_t.get_vectorstore(collN,baseDir)
    importlib.reload(c_u)
    for j, m in enumerate(modL): # unify
          try:
              unify = c_u.get_unify(modL[j])
          except:
              continue
          modN = modL[j].split("@")[0]
          print(modN)
          resL = []
          for i, aud in audD.iterrows():
              print("%0.2f" % (100.*i/audD.shape[0]),end="\r")
              q = aud['audit_question_en']
              if q == '' or q != q:
                    continue
          retL = db.similarity_search_with_relevance_scores(q)
          retS = "\n".join([x[0].metadata['s'] for x in retL])
          ansS = c_u.ask_rag(q,retS,unify)
          ansD = eval("{"+ans+"}")
          res = {}
          yes = False
          try:
              if re.search(c_u.yesRe,ansD['Answer'].split(",")[0]):
                    yes = True
          except:
              if re.search(c_u.yesRe,ansS):
                    yes = True
          res['pred_answer'] = yes
          res['pred_justification'] = ans
          res['pred_context'] = retS
          res['question'] = q
          res["ref_justification"] = aud['exp_reference_en']
          res['ref_context'] = aud['Content of BAIT Chapter (all)']
          res['ref_answer'] = aud['exp_result']
          resL.append(res)
    evalDf = pd.DataFrame(resL)
    evalDf.to_csv(baseDir + "pred_" + modN + ".csv",index=False)
       
print("te se qe te ve be te ne?")
#https://awsmanaged.softserveinc.com/

Revision as of 14:17, 20 February 2025