|
|
Line 1: |
Line 1: |
| import os, io, sys, re, json, base64 | | import re |
| import boto3
| | import nltk |
| from ast import literal_eval
| | from nltk.corpus import stopwords |
| from operator import itemgetter
| | from nltk.stem import PorterStemmer, WordNetLemmatizer |
| from langchain.agents import AgentExecutor, create_react_agent
| |
| from langchain_experimental.utilities import PythonREPL
| |
| from langchain.agents import Tool
| |
| from langchain_aws import ChatBedrock
| |
| #from langchain_community.chat_models import BedrockChat
| |
| from operator import itemgetter
| |
| from langchain_core.runnables import RunnableLambda, RunnablePassthrough | |
| from langchain.agents import AgentExecutor, create_react_agent | |
| from langchain_experimental.utilities import PythonREPL
| |
| from langchain.agents import Tool
| |
| from langchain_aws import ChatBedrock
| |
| #from src.backend.llm.prompts import simple_extraction_prompt, complex_extraction_prompt, simple_or_complex_prompt, decomp_prompt, agent_prompt
| |
| from langchain_community.document_loaders import UnstructuredExcelLoader
| |
| from azure.identity import DefaultAzureCredential
| |
| # os.environ["OPENAI_API_TYPE"] = "azure_ad"
| |
| # os.environ["OPENAI_API_KEY"] = credential.get_token("https://cognitiveservices.azure.com/.default").token
| |
| from azure.identity import ChainedTokenCredential, ManagedIdentityCredential, AzureCliCredential
| |
| from langchain_openai import AzureOpenAI
| |
| from openai import AzureOpenAI
| |
| import openai
| |
| client = AzureOpenAI(api_key=os.getenv("AZURE_OPENAI_API_KEY"),api_version=os.getenv("AZURE_OPENAI_API_VERSION"),azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"))
| |
| client = AzureOpenAI(api_key=os.getenv("AZURE_OPENAI_API_KEY"),api_version="2024-05-01-preview",azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"))
| |
| completion = client.completions.create(model="gpt-4",prompt="<prompt>")
| |
| # credential = ChainedTokenCredential(ManagedIdentityCredential(),AzureCliCredential())
| |
| # llm = AzureOpenAI()
| |
| # llm.invoke("four plus four?")
| |
|
| |
|
| | # Initialize stemmer and lemmatizer |
| | stemmer = PorterStemmer() |
| | lemmatizer = WordNetLemmatizer() |
| | STOPWORDS = set(stopwords.words('english')) |
|
| |
|
| import os
| | nltk.download('stopwords') |
| from openai import AzureOpenAI
| | nltk.download('wordnet') |
| endpoint = os.getenv("ENDPOINT_URL", "https://dsg-genai-playground-openai-eastus.openai.azure.com/")
| |
| deployment = os.getenv("DEPLOYMENT_NAME", "dsg-gpt-4-eastus")
| |
| client = AzureOpenAI(azure_endpoint=endpoint,api_key=os.getenv("AZURE_OPENAI_API_KEY"),api_version="2024-05-01-preview")
| |
| completion = client.chat.completions.create(model=deployment
| |
| , messages= [{"role": "system","content": "You are an AI assistant that helps people find information."},{"role": "user","content": "4+4?"}],
| |
| max_tokens=800, temperature=0.7, top_p=0.95, frequency_penalty=0, presence_penalty=0, stop=None, stream=False)
| |
| print(completion.to_json())
| |
|
| |
|
|
| |
|
| | def clean_text(text): |
| | # Original Text |
| | # Example: "This is a Testing @username https://example.com <p>Paragraphs!</p> #happy :)" |
|
| |
|
| | text = text.lower() # Convert all characters in text to lowercase |
| | # Example after this step: "i won't go there! this is a testing @username https://example.com <p>paragraphs!</p> #happy :)" |
|
| |
|
| from promptflow.core import AzureOpenAIModelConfiguration
| | text = re.sub(r'https?://\S+|www\.\S+', '', text) # Remove URLs |
| configuration = AzureOpenAIModelConfiguration(api_key=os.getenv("AZURE_OPENAI_API_KEY"),api_version=os.getenv("AZURE_OPENAI_API_VERSION"),azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),azure_deployment="")
| | # Example after this step: "i won't go there! this is a testing @username <p>paragraphs!</p> #happy :)" |
|
| |
|
| | text = re.sub(r'<.*?>', '', text) # Remove HTML tags |
| | # Example after this step: "i won't go there! this is a testing @username paragraphs! #happy :)" |
|
| |
|
| from promptflow.evals.evaluators
| | text = re.sub(r'@\w+', '', text) # Remove mentions |
| import ContentSafetyEvaluator, RelevanceEvaluator, CoherenceEvaluator, GroundednessEvaluator, FluencyEvaluator, SimilarityEvaluator
| | # Example after this step: "i won't go there! this is a testing paragraphs! #happy :)" |
| content_safety_evaluator = ContentSafetyEvaluator(project_scope=azure_ai_project)
| |
| relevance_evaluator = RelevanceEvaluator(model_config=configuration)
| |
| coherence_evaluator = CoherenceEvaluator(model_config=configuration)
| |
| groundedness_evaluator = GroundednessEvaluator(model_config=configuration)
| |
| fluency_evaluator = FluencyEvaluator(model_config=configuration)
| |
| similarity_evaluator = SimilarityEvaluator(model_config=configuration)
| |
|
| |
|
| | text = re.sub(r'#\w+', '', text) # Remove hashtags |
| | # Example after this step: "i won't go there! this is a testing paragraphs! :)" |
|
| |
|
| from app_target import ModelEndpoints
| | # Translate emoticons to their word equivalents |
| import pathlib
| | emoticons = {':)': 'smile', ':-)': 'smile', ':(': 'sad', ':-(': 'sad'} |
| import random
| | words = text.split() |
| from promptflow.evals.evaluate import evaluate
| | words = [emoticons.get(word, word) for word in words] |
| models = ["gpt4-0613", "gpt35-turbo", "mistral7b", "phi3_mini_serverless" ]
| | text = " ".join(words) |
| path = str(pathlib.Path(pathlib.Path.cwd())) + "/data.jsonl"
| | # Example after this step: "i won't go there! this is a testing paragraphs! smile" |
| for model in models:
| |
| randomNum = random.randint(1111, 9999) | |
| results = evaluate( | |
| azure_ai_project=azure_ai_project,
| |
| evaluation_name="Eval-Run-"+str(randomNum)+"-"+model.title(),
| |
| data=path,
| |
| target=ModelEndpoints(env_var, model),
| |
| evaluators={
| |
| "content_safety": content_safety_evaluator,
| |
| "coherence": coherence_evaluator,
| |
| "relevance": relevance_evaluator,
| |
| "groundedness": groundedness_evaluator,
| |
| "fluency": fluency_evaluator,
| |
| "similarity": similarity_evaluator,
| |
| },
| |
| evaluator_config={
| |
| "content_safety": {
| |
| "question": "${data.question}",
| |
| "answer": "${target.answer}"
| |
| },
| |
| "coherence": {
| |
| "answer": "${target.answer}",
| |
| "question": "${data.question}"
| |
| },
| |
| "relevance": {
| |
| "answer": "${target.answer}",
| |
| "context": "${data.context}",
| |
| "question": "${data.question}"
| |
| },
| |
| "groundedness": {
| |
| "answer": "${target.answer}",
| |
| "context": "${data.context}",
| |
| "question": "${data.question}"
| |
| },
| |
| "fluency": {
| |
| "answer": "${target.answer}",
| |
| "context": "${data.context}",
| |
| "question": "${data.question}"
| |
| },
| |
| "similarity": {
| |
| "answer": "${target.answer}",
| |
| "context": "${data.context}",
| |
| "question": "${data.question}"
| |
| }
| |
| }
| |
| )
| |
|
| |
|
| | text = re.sub(r'[^\w\s]', '', text) # Remove punctuations |
| | # Example after this step: "i won't go there this is a testing paragraphs smile" |
|
| |
|
| | text = re.sub(r'\s+[a-zA-Z]\s+', ' ', text) # Remove standalone single alphabetical characters |
| | # Example after this step: "won't go there this is testing paragraphs smile" |
|
| |
|
| | text = re.sub(r'\s+', ' ', text, flags=re.I) # Substitute multiple consecutive spaces with a single space |
| | # Example after this step: "won't go there this is testing paragraphs smile" |
|
| |
|
| | # Remove stopwords |
| | text = ' '.join(word for word in text.split() if word not in STOPWORDS) |
| | # Example after this step: "won't go there testing paragraphs smile" |
|
| |
|
| input_text = "Please recommend books with a theme similar to the movie 'Inception'."
| | # Stemming |
| native_request = {"inputText": input_text}
| | stemmer = PorterStemmer() |
| request = json.dumps(native_request)
| | text = ' '.join(stemmer.stem(word) for word in text.split()) |
| response = client.invoke_model(modelId=model_id, body=request)
| | # Example after this step: "won't go there test paragraph smile" |
| model_response = json.loads(response["body"].read())
| |
| print(model_response)
| |
| model_id = "anthropic.claude-3-haiku-20240307-v1:0"
| |
| user_message = "Describe the purpose of a 'hello world' program in one line."
| |
| conversation = [{"role": "user","content": [{"text": user_message}],}]
| |
| response = client.converse(modelId=model_id,messages=conversation,inferenceConfig={"maxTokens": 512, "temperature": 0.5, "topP": 0.9},)
| |
| response_text = response["output"]["message"]["content"][0]["text"]
| |
| print(response_text)
| |
|
| |
|
| def load_data_to_query(question, data):
| | # Lemmatization. (flies --> fly, went --> go) |
| return str(question) + ' Answer question based on following data: ' + str(data)
| | lemmatizer = WordNetLemmatizer() |
| | text = ' '.join(lemmatizer.lemmatize(word) for word in text.split()) |
|
| |
|
| def read_message(message):
| | return text |
| return message.content
| |
|
| |
|
| def lit_eval(text):
| | # Assuming docs is a list of objects and each object has a page_content and metadata attribute. |
| try:
| | for doc in docs: |
| return literal_eval(text)
| | original_content = doc.page_content # Save the original page_content. |
| except SyntaxError:
| | doc.page_content = clean_text(original_content) # Update page_content with the cleaned text. |
| return text
| |
|
| |
|
| def extract_dict(dictionary):
| | # Assuming metadata is a dictionary, and updating it with the original page_content under the key 'prompt'. |
| return dictionary['extraction']
| | if doc.metadata is None: # Check if metadata is None and initialize if necessary. |
| | | doc.metadata = {} |
| def extract_dictionary(message):
| | doc.metadata['prompt'] = original_content |
| text = message.content
| | print(docs[0]) |
| open_braces = 0 | |
| in_dict = False
| |
| start_index = 0
| |
| | |
| for i, char in enumerate(text):
| |
| if char == '{':
| |
| if not in_dict:
| |
| start_index = i
| |
| in_dict = True
| |
| open_braces += 1
| |
| elif char == '}':
| |
| open_braces -= 1
| |
| if in_dict and open_braces == 0:
| |
| dict_string = text[start_index:i + 1]
| |
| try:
| |
| return literal_eval(dict_string)
| |
| except ValueError as e:
| |
| print(f"Error parsing dictionary: {e}")
| |
| return None
| |
| print("No dictionary found in the string.")
| |
| return None
| |
| | |
| def get_table_from_test_set(image_file) -> str:
| |
| table_path = ocr('images', 'images', image_file) | |
| loader = UnstructuredExcelLoader(table_path, mode="elements")
| |
| docs = loader.load()
| |
| return docs[0]
| |
| | |
| def get_table_from_test_set_by_table_id(table_id: str) -> str:
| |
| table_path = ocr('images', f"./test_png/{table_id}.png")
| |
| loader = UnstructuredExcelLoader(table_path, mode="elements") | |
| docs = loader.load()
| |
| return docs[0]
| |
| | |
| def process_question(self, question, image_file):
| |
| table = get_table_from_test_set(image_file)
| |
| output = chain_main.invoke({"question": question, "table": table})
| |
| # _memory.save_context({"human_input": question},{"context": output})
| |
| return output
| |
| | |
| def route(self, info):
| |
| if "simple" in str(info["question_type"]):
| |
| return chain_simple_extraction
| |
| else:
| |
| return chain_complex
| |
| | |
| | |
| | |
| boto3_session = boto3.Session(region_name='us-east-1')
| |
| bedrock_runtime = boto3_session.client(service_name="bedrock-runtime")
| |
| llm = ChatBedrock(client=bedrock_runtime,model_id="anthropic.claude-3-sonnet-20240229-v1:0",
| |
| model_kwargs={'temperature': 0},streaming=True,)
| |
| python_repl = PythonREPL()
| |
| repl_tool = Tool(name="python_repl",
| |
| description="A Python shell. Use this to execute python commands. "
| |
| "Input should be a valid python command. If you want to see the output "
| |
| "of a value, you should print it out with `print(...)`.",
| |
| func=python_repl.run,
| |
| )
| |
| tools = [repl_tool]
| |
| agent = create_react_agent(llm, tools, agent_prompt)
| |
| agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
| |
| chain_simple_extraction = ({"question": itemgetter("question"), "table": itemgetter("table")}
| |
| | simple_extraction_prompt | llm | RunnableLambda(read_message) | lit_eval)
| |
| | |
| chain_complex_extraction = ({"decomp_dict": itemgetter("decomp_dict") | RunnableLambda(extract_dict),
| |
| "table": itemgetter("table")}
| |
| | complex_extraction_prompt | llm | RunnableLambda(read_message))
| |
| | |
| chain_simple_or_complex = ({"question": itemgetter("question"), "table": itemgetter("table")}
| |
| | simple_or_complex_prompt | llm | RunnableLambda(read_message))
| |
| | |
| chain_decompose = ({"question": itemgetter("question")} | decomp_prompt | llm | extract_dictionary)
| |
| | |
| chain_complex = (RunnablePassthrough.assign(decomp_dict=chain_decompose)
| |
| | RunnablePassthrough.assign(data=chain_complex_extraction)
| |
| | RunnablePassthrough.assign(query=lambda x: load_data_to_query(x["question"], x['data']))
| |
| | {"input": itemgetter("query")}
| |
| | (RunnablePassthrough.assign(response=agent_executor)))
| |
| | |
| | |
| qa_agent = QuestionAnsweringAgent()
| |
| output = qa_agent.process_question(question=question, image_file=image)
| |
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer, WordNetLemmatizer
- Initialize stemmer and lemmatizer
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
STOPWORDS = set(stopwords.words('english'))
nltk.download('stopwords')
nltk.download('wordnet')
def clean_text(text):
# Original Text
# Example: "This is a Testing @username https://example.com
Paragraphs!
#happy :)"
text = text.lower() # Convert all characters in text to lowercase
# Example after this step: "i won't go there! this is a testing @username https://example.com
paragraphs!
#happy :)"
text = re.sub(r'https?://\S+|www\.\S+', , text) # Remove URLs
# Example after this step: "i won't go there! this is a testing @username
paragraphs!
#happy :)"
text = re.sub(r'<.*?>', , text) # Remove HTML tags
# Example after this step: "i won't go there! this is a testing @username paragraphs! #happy :)"
text = re.sub(r'@\w+', , text) # Remove mentions
# Example after this step: "i won't go there! this is a testing paragraphs! #happy :)"
text = re.sub(r'#\w+', , text) # Remove hashtags
# Example after this step: "i won't go there! this is a testing paragraphs! :)"
# Translate emoticons to their word equivalents
emoticons = {':)': 'smile', ':-)': 'smile', ':(': 'sad', ':-(': 'sad'}
words = text.split()
words = [emoticons.get(word, word) for word in words]
text = " ".join(words)
# Example after this step: "i won't go there! this is a testing paragraphs! smile"
text = re.sub(r'[^\w\s]', , text) # Remove punctuations
# Example after this step: "i won't go there this is a testing paragraphs smile"
text = re.sub(r'\s+[a-zA-Z]\s+', ' ', text) # Remove standalone single alphabetical characters
# Example after this step: "won't go there this is testing paragraphs smile"
text = re.sub(r'\s+', ' ', text, flags=re.I) # Substitute multiple consecutive spaces with a single space
# Example after this step: "won't go there this is testing paragraphs smile"
# Remove stopwords
text = ' '.join(word for word in text.split() if word not in STOPWORDS)
# Example after this step: "won't go there testing paragraphs smile"
# Stemming
stemmer = PorterStemmer()
text = ' '.join(stemmer.stem(word) for word in text.split())
# Example after this step: "won't go there test paragraph smile"
# Lemmatization. (flies --> fly, went --> go)
lemmatizer = WordNetLemmatizer()
text = ' '.join(lemmatizer.lemmatize(word) for word in text.split())
return text
- Assuming docs is a list of objects and each object has a page_content and metadata attribute.
for doc in docs:
original_content = doc.page_content # Save the original page_content.
doc.page_content = clean_text(original_content) # Update page_content with the cleaned text.
# Assuming metadata is a dictionary, and updating it with the original page_content under the key 'prompt'.
if doc.metadata is None: # Check if metadata is None and initialize if necessary.
doc.metadata = {}
doc.metadata['prompt'] = original_content
print(docs[0])