Kotoba: Difference between revisions
(kotoba) |
No edit summary |
||
Line 1: | Line 1: | ||
import os, sys, json, re | import os, sys, json, re | ||
import pandas as pd | import pandas as pd | ||
import | import langchain as lc | ||
import camelot | |||
import pandasai | |||
import markdown | |||
from bs4 import BeautifulSoup | |||
# import pdftotree $ with tensorflow | |||
import kotoba.chatbot_utils as c_t | import kotoba.chatbot_utils as c_t | ||
import importlib | import importlib | ||
from pandasai.llm import BedrockClaude | |||
from pandasai.llm import LLM | |||
from pandasai.prompts import BasePrompt | |||
from langchain import PromptTemplate | |||
from langchain.chains import LLMChain | |||
modL = ["gpt-4o@openai","gpt-4-turbo@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","llama-2-70b-chat@aws-bedrock","codellama-34b-instruct@together-ai","gemma-7b-it@fireworks-ai","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic","mistral-7b-instruct-v0.1@fireworks-ai","mistral-7b-instruct-v0.2@fireworks-ai"] | |||
os.environ['OPENAI_MODEL_NAME'] = modL[0] | |||
system_message = "You are a Data Analyst and pandas expert. Your goal is to help people generate high quality and robust code." | |||
model_params = {"do_sample": True,"top_p": 0.9,"top_k": 40,"temperature": 0.1,"max_new_tokens": 1024,"repetition_penalty": 1.03,"stop": ["</s>"]} | |||
with open( | def html2df(fName,llm): | ||
with open(fName) as fByte: | |||
html_text = fByte.read() | |||
soup = BeautifulSoup(html_text, 'html.parser') | |||
tableL = soup.find_all('table') | |||
tableS = "".join([str(t) for t in tableL]) | |||
tabDf = pd.read_html(tableS) | |||
for tab in tableL: | |||
t = str(tab) | |||
if re.search("flexibility gradually",t): | |||
tabD = pd.read_html(t, header=[0,1])[0] | |||
break | |||
agent = pandasai.Agent(tabD, config={"llm": llm}) | |||
df = pandasai.SmartDataframe(tabD, config={"llm": llm}) | |||
return df | |||
text = | def md2df(text,llm): | ||
lines = text.split("\n") | |||
header = lines[0].strip("|").split("|") | |||
data = [] | |||
for line in lines[2:]: | |||
if not line.strip(): | |||
break | |||
cols = line.strip("|").split("|") | |||
row = dict(zip(header, cols)) | |||
data.append(row) | |||
df = pd.DataFrame(data) | |||
sdf = pandasai.SmartDataframe(df, config={"llm": llm}) | |||
return sdf | |||
def get_local_llm(): | |||
from pandasai.llm import HuggingFaceTextGen | |||
llm = HuggingFaceTextGen(inference_server_url="http://127.0.0.1:8080") | |||
return llm | |||
def get_bedrock(): | |||
bedrock_runtime_client = boto3.client('bedrock-runtime') | |||
llm = BedrockClaude(bedrock_runtime_client) | |||
return llm | |||
def numeric_qa(question,dataframe,model=llm,qa_prompt=numeric_qa_prompt,to_html=False): | |||
""" | |||
A function that passes a prompt, question and table to the LLM. | |||
There's an option of converting a data frame to HTML. | |||
""" | |||
if to_html: | |||
dataframe = dataframe.to_html() | |||
prompt_qa = PromptTemplate(template=qa_prompt, input_variables=["text", "table"]) | |||
llm_chain = LLMChain(prompt=prompt_qa, llm=model) | |||
llm_reply = llm_chain.predict(text = question, table = dataframe) | |||
return print(llm_reply) | |||
if False: | |||
import seaborn as sns | |||
iris = sns.load_dataset('iris') | |||
iris.head() | |||
agent = pandasai.Agent(iris, config={"llm": llm}) | |||
resp = agent.chat('Which is the most common specie?') | |||
sales_by_country = pd.DataFrame({ | |||
"country": ["United States", "United Kingdom", "France", "Germany", "Italy", "Spain", "Canada", "Australia", "Japan", "China"], | |||
"sales": [5000, 3200, 2900, 4100, 2300, 2100, 2500, 2600, 4500, 7000] | |||
}) | |||
agent = pandasai.Agent(sales_by_country, config={"llm": llm}) | |||
resp = agent.chat('Which are the top 5 countries by sales?') | |||
Revision as of 12:01, 6 November 2024
import os, sys, json, re import pandas as pd import langchain as lc import camelot import pandasai import markdown from bs4 import BeautifulSoup
- import pdftotree $ with tensorflow
import kotoba.chatbot_utils as c_t import importlib from pandasai.llm import BedrockClaude from pandasai.llm import LLM from pandasai.prompts import BasePrompt from langchain import PromptTemplate from langchain.chains import LLMChain
modL = ["gpt-4o@openai","gpt-4-turbo@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","llama-2-70b-chat@aws-bedrock","codellama-34b-instruct@together-ai","gemma-7b-it@fireworks-ai","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic","mistral-7b-instruct-v0.1@fireworks-ai","mistral-7b-instruct-v0.2@fireworks-ai"] os.environ['OPENAI_MODEL_NAME'] = modL[0] system_message = "You are a Data Analyst and pandas expert. Your goal is to help people generate high quality and robust code." model_params = {"do_sample": True,"top_p": 0.9,"top_k": 40,"temperature": 0.1,"max_new_tokens": 1024,"repetition_penalty": 1.03,"stop": [""]}
def html2df(fName,llm):
with open(fName) as fByte: html_text = fByte.read() soup = BeautifulSoup(html_text, 'html.parser') tableL = soup.find_all('table') tableS = "".join([str(t) for t in tableL]) tabDf = pd.read_html(tableS) for tab in tableL: t = str(tab) if re.search("flexibility gradually",t): tabD = pd.read_html(t, header=[0,1])[0] break
agent = pandasai.Agent(tabD, config={"llm": llm}) df = pandasai.SmartDataframe(tabD, config={"llm": llm}) return df
def md2df(text,llm):
lines = text.split("\n") header = lines[0].strip("|").split("|") data = [] for line in lines[2:]: if not line.strip(): break cols = line.strip("|").split("|") row = dict(zip(header, cols)) data.append(row) df = pd.DataFrame(data) sdf = pandasai.SmartDataframe(df, config={"llm": llm}) return sdf
def get_local_llm():
from pandasai.llm import HuggingFaceTextGen llm = HuggingFaceTextGen(inference_server_url="http://127.0.0.1:8080") return llm
def get_bedrock():
bedrock_runtime_client = boto3.client('bedrock-runtime') llm = BedrockClaude(bedrock_runtime_client) return llm
def numeric_qa(question,dataframe,model=llm,qa_prompt=numeric_qa_prompt,to_html=False):
""" A function that passes a prompt, question and table to the LLM. There's an option of converting a data frame to HTML. """ if to_html: dataframe = dataframe.to_html() prompt_qa = PromptTemplate(template=qa_prompt, input_variables=["text", "table"]) llm_chain = LLMChain(prompt=prompt_qa, llm=model) llm_reply = llm_chain.predict(text = question, table = dataframe) return print(llm_reply)
if False:
import seaborn as sns iris = sns.load_dataset('iris') iris.head() agent = pandasai.Agent(iris, config={"llm": llm}) resp = agent.chat('Which is the most common specie?') sales_by_country = pd.DataFrame({ "country": ["United States", "United Kingdom", "France", "Germany", "Italy", "Spain", "Canada", "Australia", "Japan", "China"], "sales": [5000, 3200, 2900, 4100, 2300, 2100, 2500, 2600, 4500, 7000] }) agent = pandasai.Agent(sales_by_country, config={"llm": llm}) resp = agent.chat('Which are the top 5 countries by sales?')