(Created page with "import os, sys, json, re import pandas as pd import langchain as lc import camelot import markdown from bs4 import BeautifulSoup import kotoba.chatbot_utils as c_t import importlib import pandasai from pandasai.llm import BedrockClaude from pandasai.llm import LLM from pandasai.prompts import BasePrompt from langchain import PromptTemplate from langchain.chains import LLMChain from beautifultable import BeautifulTable from typing import List modL = ["gpt-4o@openai","gpt...")
 
(Blanked the page)
Tag: Blanking
Line 1: Line 1:
import os, sys, json, re
import pandas as pd
import langchain as lc
import camelot
import markdown
from bs4 import BeautifulSoup
import kotoba.chatbot_utils as c_t
import importlib
import pandasai
from pandasai.llm import BedrockClaude
from pandasai.llm import LLM
from pandasai.prompts import BasePrompt
from langchain import PromptTemplate
from langchain.chains import LLMChain
from beautifultable import BeautifulTable
from typing import List


modL = ["gpt-4o@openai","gpt-4-turbo@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","llama-2-70b-chat@aws-bedrock","codellama-34b-instruct@together-ai","gemma-7b-it@fireworks-ai","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic","mistral-7b-instruct-v0.1@fireworks-ai","mistral-7b-instruct-v0.2@fireworks-ai"]
os.environ['OPENAI_MODEL_NAME'] = modL[0]
system_message = "You are a Data Analyst and pandas expert. Your goal is to help people generate high quality and robust code."
model_params = {"do_sample": True,"top_p": 0.9,"top_k": 40,"temperature": 0.1,"max_new_tokens": 1024,"repetition_penalty": 1.03,"stop": ["</s>"]}
promptS = """
[INST]Offer a thorough and accurate solution that directly addresses the Question outlined in the [Question].
### [Table Text]
{table_descriptions}
### [Table]
```
{table_in_csv}
```
### [Question]
{question}
### [Solution][INST/]
"""
tab_assistantS = """You are a customer service agent that helps a customer with answering questions.
Please answer the question based on the provided context below.
Make sure not to make any changes to the context, if possible, when preparing answers to provide accurate responses.
If the answer cannot be found in context, just say that you do not know, do not try to make up an answer."""
tab_assistantS = """
Sie sind ein Kundendienstmitarbeiter, der einem Kunden bei der Beantwortung von Fragen hilft.
Bitte beantworten Sie die Frage auf der Grundlage des unten angegebenen Kontexts.
Achten Sie darauf, den Kontext möglichst nicht zu verändern, wenn Sie die Antworten vorbereiten, um genaue Antworten zu geben.
Wenn die Antwort nicht im Kontext gefunden werden kann, sagen Sie einfach, dass Sie es nicht wissen, und versuchen Sie nicht, eine Antwort zu erfinden.
Bitte kurz und gezielt auf Deutsch antworten
"""
def get_tables(pdf_doc:str, pages:str):   
    tableL = camelot.read_pdf(pdf_doc,pages=pages)
    if tableL.n == 0:
        return {}
    for tab in range(tableL.n):
        tableD = tableL[tab].df
        tableD = (tableD.rename(columns=tableD.iloc[0]).drop(tableD.index[0]).reset_index(drop=True))   
        tableD = tableD.apply(lambda x: x.str.replace('\n',''))
        tableD.columns = [col.replace('\n', ' ').replace(' ', '') for col in tableD.columns]
        tableD.columns = [col.replace('(', '').replace(')', '') for col in tableD.columns]
    return tableD
def ask_table_langchain(llm, question:str, context:str, lang:str = "en"):
    promptS = tab_assistantS
    res = llm.invoke([{"role": "system","content": promptS,},
                          {"role": "user", "content": question},
                          {"role": "assistant", "content": context},
                          ])
    return res.content
def ask_table(llm, question:str, context:str, lang:str = "en"):
    promptS = tab_assistantS
    if lang == "de":
        promptS = tab_assistantS_de
   
    response = llm.chat.completions.create(model=os.getenv("AZURE_DEPLOYMENT"),
                                          messages=[{"role": "system","content": promptS,},
                                                    {"role": "user", "content": question},
                                                    {"role": "assistant", "content": context},
                                                    ])
    return response.choices[0].message.content
def run_question(llm, query: str, eval_df:str):
    questions = []
    answers = []
    for index, row in eval_df.iterrows():
        questions.append(query)
        response = response_test(llm, query, str(row['Data raw']))
        answers.append(response)
       
    eval_df['Question'] = questions
    eval_df['Answer'] = answers
    return eval_df
def BeautifulTableformat(query:str, results:pd.DataFrame, MaxWidth:int = 250):
    table = BeautifulTable(maxwidth=MaxWidth, default_alignment=BeautifulTable.ALIGN_LEFT)
    table.columns.header = ["Data Format", "Query", "Answer"]
    for index, row in results.iterrows():
        table.rows.append([row['Data Format'], query, row['Answer']])
   
    return table
def html2df(fName,llm):
    with open(fName) as fByte:
        html_text = fByte.read()
    soup = BeautifulSoup(html_text, 'html.parser')
    tableL = soup.find_all('table')
    tableS = "".join([str(t) for t in tableL])
    tabDf = pd.read_html(tableS)
    for tab in tableL:
        t = str(tab)
        if re.search("flexibility gradually",t):
            tabD  = pd.read_html(t, header=[0,1])[0]
            break
    agent = pandasai.Agent(tabD, config={"llm": llm})
    df = pandasai.SmartDataframe(tabD, config={"llm": llm})
    return df
def md2df(text,llm):
    lines = text.split("\n")
    header = lines[0].strip("|").split("|")
    data = []
    for line in lines[2:]:
        if not line.strip():
            break
       
        cols = line.strip("|").split("|")
        row = dict(zip(header, cols))
        data.append(row)
    df = pd.DataFrame(data)
    sdf = pandasai.SmartDataframe(df, config={"llm": llm})
    return sdf
def get_local_llm():
    from pandasai.llm import HuggingFaceTextGen
    llm = HuggingFaceTextGen(inference_server_url="http://127.0.0.1:8080")
    return llm
   
def get_bedrock():
    bedrock_runtime_client = boto3.client('bedrock-runtime')
    llm = BedrockClaude(bedrock_runtime_client)
    return llm
numeric_qa_prompt = """[INST] You are a task answering user questions ONLY based on the provided data frame.
[EXAMPLE]For example:
User question: "How many products of category perfumaria are there?"
Answer: "There are 868 products of category perfumaria."
[/EXAMPLE]
Answer should be specific and precise, don't add anything else!
If you can't answer the question based on the provided data, say so, don't try to guess!
User question: {text}
Data frame: {table}
[/INST]"""
def numeric_qa(question,dataframe,llm,to_html=False):
    """
    A function that passes a prompt, question and table to the LLM.
    There's an option of converting a data frame to HTML.
    """
    if to_html:
        dataframe = dataframe.to_html()
    prompt_qa = PromptTemplate(template=qa_prompt, input_variables=["text", "table"])
    llm_chain = LLMChain(prompt=prompt_qa, llm=model)
    llm_reply = llm_chain.predict(text = question, table = dataframe)
    return llm_reply
table_description = """The first table is 'products'.
It includes information about products.
The table includes columns:
- product_id (str) - unique key of a product,
- product_category_name (str) - name of product category in Spanish,
- product_name_lenght (float ) - number of characters in a product name,
- product_description_length (float) - number of characters in product description,
- product_photos_qty (float) - number of product photos,
- product_weight_g (float) - weight of product in grams,
- product_length_cm (float) - product length in centimeters,
- product_height_cm (float) - product height in centimeters,
- product_width_cm (float) - product width in centimeters.
The second table is 'product_category_name_translation'. It contains mapping of English and Spanish names of products.
The columns are:
- product_category_name (str) - name of product category in Spanish,
- product_category_name_english (str) - name of product category in English.
The third table is 'order_items'. It contains information about orders.
The columns are:
- order_id (str) - unique key of an order,
- order_item_id (int) - item quantity,
- product_id (str) - key of an ordered product.
- seller_id (str) - key of a seller,
- shipping_limit_date (datetime) - date of shipping,
- price (float) - price of a product,
- freight_value (float) - freight calue of a product.
"""
def text2sql(question,llm,df,header):
    prompt_sql = PromptTemplate(template=question,input_variables=["text", "data_description"])
    llm_chain = LLMChain(prompt=prompt_sql,llm=llm)
    llm_reply = llm_chain.predict(text=question,data_description=table_description)
    print(llm_reply)
    json_reply = json.loads(llm_reply.replace('\n',' '))
    sql_query = json_reply['sql_query']
    df_reply = execute_query(sql_query)
    print(df_reply)
    prompt_insight = PromptTemplate(template=df_to_insight_prompt, input_variables=["text", "sql_query", "table"])
    llm_chain = LLMChain(prompt=prompt_insight, llm=llm_model)
    llm_reply = llm_chain.predict(text = question, sql_query = sql_query, table = df_reply)
    return print(llm_reply)
if False:
    import seaborn as sns
    iris = sns.load_dataset('iris')
    iris.head()
    agent = pandasai.Agent(iris, config={"llm": llm})
    resp = agent.chat('Which is the most common specie?')
    print(resp)
    sales = pd.DataFrame({
        "country": ["United States", "United Kingdom", "France", "Germany", "Italy", "Spain", "Canada", "Australia", "Japan", "China"],
        "sales": [5000, 3200, 2900, 4100, 2300, 2100, 2500, 2600, 4500, 7000]
    })
    agent = pandasai.Agent(sales, config={"llm": llm})
    resp = agent.chat('Which are the top 5 countries by sales?')
    lake = pandasai.SmartDatalake([iris,sales], config={"llm": llm})
    response = lake.chat('Which are the 5 happiest countries')
    print(response)

Revision as of 14:15, 20 February 2025