(kotoba)
 
No edit summary
Line 1: Line 1:
import os, sys, json, re
import os, sys, json, re
import pandas as pd
import pandas as pd
import numpy as np
import langchain as lc
os.environ['LAV_DIR'] = '/home/gmare/lav/'
import camelot
dL = os.listdir(os.environ['LAV_DIR']+'/src/')
import pandasai
sys.path = list(set(sys.path + [os.environ['LAV_DIR']+'/src/'+x for x in dL]))
import markdown
from bs4 import BeautifulSoup
# import pdftotree $ with tensorflow
import kotoba.chatbot_utils as c_t
import kotoba.chatbot_utils as c_t
import kotoba.chatbot_unify as c_u
import kotoba.chatbot_prompt as c_p
import kotoba.pdf_tools as p_t
import kotoba.table_chat as t_c
import importlib
import importlib
from pandasai.llm import BedrockClaude
from pandasai.llm import LLM
from pandasai.prompts import BasePrompt
from langchain import PromptTemplate
from langchain.chains import LLMChain


importlib.reload(c_t)
modL = ["gpt-4o@openai","gpt-4-turbo@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","llama-2-70b-chat@aws-bedrock","codellama-34b-instruct@together-ai","gemma-7b-it@fireworks-ai","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic","mistral-7b-instruct-v0.1@fireworks-ai","mistral-7b-instruct-v0.2@fireworks-ai"]
importlib.reload(c_p)
os.environ['OPENAI_MODEL_NAME'] = modL[0]
baseDir = os.environ['HOME'] + '/lav/soft/raw/'
system_message = "You are a Data Analyst and pandas expert. Your goal is to help people generate high quality and robust code."
pdf_doc = baseDir + 'Policies.pdf'
model_params = {"do_sample": True,"top_p": 0.9,"top_k": 40,"temperature": 0.1,"max_new_tokens": 1024,"repetition_penalty": 1.03,"stop": ["</s>"]}
pdf_doc = baseDir + 'data_proc.pdf'
headers_split = [("####","Chapter"),("######","Section"),('########','Subsection')]
pdf_doc = baseDir + 'panasonic_airconditioner_manual.pdf'
headers_split = [('#',"Title"),("##","Chapter"),("####","Section"),("###","Subsection")]
pdf_doc = baseDir + 'leipzig/kompendium.pdf'
#pdf_doc = baseDir + 'BaroneLamberto2.pdf'
collN = re.sub(".pdf","",pdf_doc).split("/")[-1]


with open(baseDir + 'leipzig/kompendium.md','r') as f:
def html2df(fName,llm):
    md_text = f.read()
    with open(fName) as fByte:
        html_text = fByte.read()
    soup = BeautifulSoup(html_text, 'html.parser')
    tableL = soup.find_all('table')
    tableS = "".join([str(t) for t in tableL])
    tabDf = pd.read_html(tableS)
    for tab in tableL:
        t = str(tab)
        if re.search("flexibility gradually",t):
            tabD  = pd.read_html(t, header=[0,1])[0]
            break


importlib.reload(p_t)
    agent = pandasai.Agent(tabD, config={"llm": llm})
docL = p_t.split_text(md_text,headers_split)
    df = pandasai.SmartDataframe(tabD, config={"llm": llm})
print(len(docL))
    return df


text = docL[2].page_content
def md2df(text,llm):
llm = c_t.get_llm_bedrock()
    lines = text.split("\n")
insD = t_c.md2df(text,llm)
    header = lines[0].strip("|").split("|")
respL = []
    data = []  
qL = ["Was ist versichert?","Was ist in compact Tariff versichert dass nicht in classic Tariff versichert ist?","Was ist in classic Tariff versichert dass nicht in compact Tariff versichert ist?"]
    for line in lines[2:]:
for q in qL:
        if not line.strip():
     resp = insD.chat(q)
            break
     respL.append({"question":q,"answer":resp})
       
      
        cols = line.strip("|").split("|")
        row = dict(zip(header, cols))
        data.append(row)
     df = pd.DataFrame(data)
     sdf = pandasai.SmartDataframe(df, config={"llm": llm})
     return sdf


   


if False: #caption images
def get_local_llm():
     importlib.reload(c_t)
     from pandasai.llm import HuggingFaceTextGen
     docL = c_t.pdf2md(pdf_doc,headers_split)
     llm = HuggingFaceTextGen(inference_server_url="http://127.0.0.1:8080")
    collT = c_t.create_collection(docL,collN,baseDir)
     return llm
    vectT = c_t.faiss_vector_storage(docL,collN,baseDir)
     vectT = c_t.create_neo4j(docL,collN,baseDir,os.environ['NEO4J_PASS'])
      
      
if False: #caption images
def get_bedrock():
    importlib.reload(c_t)
     bedrock_runtime_client = boto3.client('bedrock-runtime')
     fL = os.listdir(baseDir + collN)
     llm = BedrockClaude(bedrock_runtime_client)
     imgL = c_t.image_description(baseDir + collN,fL)
     return llm
     collI = c_t.create_collection(imgL,collN + "_img",baseDir)


if False: # create summaries
    importlib.reload(c_t)
    llm = c_t.get_llm_bedrock()
    summL = c_t.section_summary(docL,llm)
    collS = c_t.create_collection(summL,collN + "_summary",baseDir)
    vectS = c_t.faiss_vector_storage(summL,collN + "_summary",baseDir)
   
if False:
    #docL = c_t.pdf_page([pdf_doc])
    #docL = c_t.pdf2tree(pdf_doc)
    docL = c_t.pdf2md(pdf_doc)
    collT, collS = c_t.create_collection_summary(docL,collN,baseDir,llm)
else:
    importlib.reload(c_t)
    c_t.list_collection(baseDir)
    collT = c_t.load_chroma(collN,baseDir)
    collS = c_t.load_chroma(collN + "_summary",baseDir)
    collI = c_t.load_chroma(collN + "_img",baseDir)
    vectT = c_t.get_vectorstore(collN,baseDir)
    retrT = c_t.get_chroma_retriever(collN,baseDir)


    q = "Where is the error code table"
    resL = vectT.similarity_search(query=q,k=5)
    print("\n".join([str(x.metadata) for x in resL]))
    resL = retrT.invoke(q)
    print("\n".join([str(x.metadata) for x in resL]))


def numeric_qa(question,dataframe,model=llm,qa_prompt=numeric_qa_prompt,to_html=False):
    """
    A function that passes a prompt, question and table to the LLM.
    There's an option of converting a data frame to HTML.
    """
    if to_html:
        dataframe = dataframe.to_html()
    prompt_qa = PromptTemplate(template=qa_prompt, input_variables=["text", "table"])
    llm_chain = LLMChain(prompt=prompt_qa, llm=model)
    llm_reply = llm_chain.predict(text = question, table = dataframe)
    return print(llm_reply)


 
if False:
      
     import seaborn as sns
     llm = c_t.get_llm_bedrock()
     iris = sns.load_dataset('iris')
    res = c_t.search_neo4j(q,llm,collN,os.environ['NEO4J_PASS'])
     iris.head()
 
     agent = pandasai.Agent(iris, config={"llm": llm})
 
     resp = agent.chat('Which is the most common specie?')
 
     sales_by_country = pd.DataFrame({
      
        "country": ["United States", "United Kingdom", "France", "Germany", "Italy", "Spain", "Canada", "Australia", "Japan", "China"],
    for doc in results:
         "sales": [5000, 3200, 2900, 4100, 2300, 2100, 2500, 2600, 4500, 7000]
        print(f"* {doc.page_content} [{doc.metadata}]")
    })
      
    agent = pandasai.Agent(sales_by_country, config={"llm": llm})
    retriever = vectT.as_retriever(search_type="mmr", search_kwargs={"k": 1, "fetch_k": 5})
     resp = agent.chat('Which are the top 5 countries by sales?')
     #retriever.invoke("Error code 53", filter={"source": "news"})
    print(retriever.invoke("Error code 53"))
    # index = c_t.load_faiss(pdf_doc,baseDir)
    # query_engine = index.as_query_engine()
    # response = query_engine.query(q)
    # print(response.response)
    # n = response.source_nodes[0]
 
if False: #langchain
    importlib.reload(c_p)
    importlib.reload(c_t)
    llm = c_t.get_llm()
    chain = c_t.get_chain_confidence(llm,collN,baseDir)
    resL = []
    for i, aud in audD.iterrows():
        print("%0.2f" % (100.*i/audD.shape[0]),end="\r")
        q = aud['audit_question_en']
        if q == '' or q != q:
            continue
        try:
            ans = c_t.format_confidence(chain.invoke(q))
        except:
            continue
        res['question'] = q
        res['pred_answer'] = ans['answer']
        res['pred_justification'] = ans['confidence']
        res['pred_context'] = ''
        res["ref_justification"] = aud['exp_reference_en']
        res['ref_context'] = aud['Content of BAIT Chapter (all)']
        res['ref_answer'] = aud['exp_result']
        resL.append(res)
 
     evalDf = pd.DataFrame(resL)
    evalDf.to_csv(baseDir + "pred_" + modN + ".csv",index=False)
 
 
resp = requests.get('https://api.unify.ai/v0/models',headers={"Authorization":"Bearer " + os.environ['UNIFY_KEY']})
modL = resp.text
modL = ["gpt-4o@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic"]
#selL = collT.get(include=[],limit=5,offset=1)
db = c_t.get_vectorstore(collN,baseDir)
importlib.reload(c_u)
for j, m in enumerate(modL): # unify
    try:
        unify = c_u.get_unify(modL[j])
    except:
        continue
    modN = modL[j].split("@")[0]
    print(modN)
    resL = []
    for i, aud in audD.iterrows():
         print("%0.2f" % (100.*i/audD.shape[0]),end="\r")
        q = aud['audit_question_en']
        if q == '' or q != q:
            continue
        retL = db.similarity_search_with_relevance_scores(q)
        retS = "\n".join([x[0].metadata['s'] for x in retL])
        ansS = c_u.ask_rag(q,retS,unify)
        ansD = eval("{"+ans+"}")
        res = {}
        yes = False
        try:
            if re.search(c_u.yesRe,ansD['Answer'].split(",")[0]):
                yes = True
        except:
            if re.search(c_u.yesRe,ansS):
                yes = True
        res['pred_answer'] = yes
        res['pred_justification'] = ans
        res['pred_context'] = retS
        res['question'] = q
        res["ref_justification"] = aud['exp_reference_en']
        res['ref_context'] = aud['Content of BAIT Chapter (all)']
        res['ref_answer'] = aud['exp_result']
        resL.append(res)
 
     evalDf = pd.DataFrame(resL)
    evalDf.to_csv(baseDir + "pred_" + modN + ".csv",index=False)
 
       
 
print("te se qe te ve be te ne?")

Revision as of 12:01, 6 November 2024

import os, sys, json, re import pandas as pd import langchain as lc import camelot import pandasai import markdown from bs4 import BeautifulSoup

  1. import pdftotree $ with tensorflow

import kotoba.chatbot_utils as c_t import importlib from pandasai.llm import BedrockClaude from pandasai.llm import LLM from pandasai.prompts import BasePrompt from langchain import PromptTemplate from langchain.chains import LLMChain

modL = ["gpt-4o@openai","gpt-4-turbo@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","llama-2-70b-chat@aws-bedrock","codellama-34b-instruct@together-ai","gemma-7b-it@fireworks-ai","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic","mistral-7b-instruct-v0.1@fireworks-ai","mistral-7b-instruct-v0.2@fireworks-ai"] os.environ['OPENAI_MODEL_NAME'] = modL[0] system_message = "You are a Data Analyst and pandas expert. Your goal is to help people generate high quality and robust code." model_params = {"do_sample": True,"top_p": 0.9,"top_k": 40,"temperature": 0.1,"max_new_tokens": 1024,"repetition_penalty": 1.03,"stop": [""]}

def html2df(fName,llm):

   with open(fName) as fByte:
       html_text = fByte.read()
   soup = BeautifulSoup(html_text, 'html.parser')
   tableL = soup.find_all('table')
   tableS = "".join([str(t) for t in tableL])
   tabDf = pd.read_html(tableS)
   for tab in tableL:
       t = str(tab)
       if re.search("flexibility gradually",t):
           tabD  = pd.read_html(t, header=[0,1])[0]
           break
   agent = pandasai.Agent(tabD, config={"llm": llm})
   df = pandasai.SmartDataframe(tabD, config={"llm": llm})
   return df

def md2df(text,llm):

   lines = text.split("\n")
   header = lines[0].strip("|").split("|")
   data = [] 
   for line in lines[2:]:
       if not line.strip():
           break
       
       cols = line.strip("|").split("|")
       row = dict(zip(header, cols))
       data.append(row)
   df = pd.DataFrame(data)
   sdf = pandasai.SmartDataframe(df, config={"llm": llm})
   return sdf


def get_local_llm():

   from pandasai.llm import HuggingFaceTextGen
   llm = HuggingFaceTextGen(inference_server_url="http://127.0.0.1:8080")
   return llm
   

def get_bedrock():

   bedrock_runtime_client = boto3.client('bedrock-runtime')
   llm = BedrockClaude(bedrock_runtime_client)
   return llm


def numeric_qa(question,dataframe,model=llm,qa_prompt=numeric_qa_prompt,to_html=False):

   """
   A function that passes a prompt, question and table to the LLM.
   There's an option of converting a data frame to HTML.
   """
   if to_html:
       dataframe = dataframe.to_html()
   prompt_qa = PromptTemplate(template=qa_prompt, input_variables=["text", "table"])
   llm_chain = LLMChain(prompt=prompt_qa, llm=model)
   llm_reply = llm_chain.predict(text = question, table = dataframe)
   return print(llm_reply)

if False:

   import seaborn as sns
   iris = sns.load_dataset('iris')
   iris.head()
   agent = pandasai.Agent(iris, config={"llm": llm})
   resp = agent.chat('Which is the most common specie?')
   sales_by_country = pd.DataFrame({
       "country": ["United States", "United Kingdom", "France", "Germany", "Italy", "Spain", "Canada", "Australia", "Japan", "China"],
       "sales": [5000, 3200, 2900, 4100, 2300, 2100, 2500, 2600, 4500, 7000]
   })
   agent = pandasai.Agent(sales_by_country, config={"llm": llm})
   resp = agent.chat('Which are the top 5 countries by sales?')