import os, io, sys, re, json, base64 import boto3 from ast import literal_eval from operator import itemgetter from langchain.agents import AgentExecutor, create_react_agent from langchain_experimental.utilities import PythonREPL from langchain.agents import Tool from langchain_aws import ChatBedrock

  1. from langchain_community.chat_models import BedrockChat

from operator import itemgetter from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain.agents import AgentExecutor, create_react_agent from langchain_experimental.utilities import PythonREPL from langchain.agents import Tool from langchain_aws import ChatBedrock

  1. from src.backend.llm.prompts import simple_extraction_prompt, complex_extraction_prompt, simple_or_complex_prompt, decomp_prompt, agent_prompt

from langchain_community.document_loaders import UnstructuredExcelLoader from azure.identity import DefaultAzureCredential

  1. os.environ["OPENAI_API_TYPE"] = "azure_ad"
  2. os.environ["OPENAI_API_KEY"] = credential.get_token("https://cognitiveservices.azure.com/.default").token

from azure.identity import ChainedTokenCredential, ManagedIdentityCredential, AzureCliCredential from langchain_openai import AzureOpenAI from openai import AzureOpenAI import openai client = AzureOpenAI(api_key=os.getenv("AZURE_OPENAI_API_KEY"),api_version=os.getenv("AZURE_OPENAI_API_VERSION"),azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT")) client = AzureOpenAI(api_key=os.getenv("AZURE_OPENAI_API_KEY"),api_version="2024-05-01-preview",azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT")) completion = client.completions.create(model="gpt-4",prompt="<prompt>")

  1. credential = ChainedTokenCredential(ManagedIdentityCredential(),AzureCliCredential())
  2. llm = AzureOpenAI()
  3. llm.invoke("four plus four?")


import os from openai import AzureOpenAI endpoint = os.getenv("ENDPOINT_URL", "https://dsg-genai-playground-openai-eastus.openai.azure.com/") deployment = os.getenv("DEPLOYMENT_NAME", "dsg-gpt-4-eastus") client = AzureOpenAI(azure_endpoint=endpoint,api_key=os.getenv("AZURE_OPENAI_API_KEY"),api_version="2024-05-01-preview") completion = client.chat.completions.create(model=deployment

                                           , messages= [{"role": "system","content": "You are an AI assistant that helps people find information."},{"role": "user","content": "4+4?"}],
                                           max_tokens=800, temperature=0.7, top_p=0.95, frequency_penalty=0, presence_penalty=0, stop=None, stream=False)

print(completion.to_json())



from promptflow.core import AzureOpenAIModelConfiguration configuration = AzureOpenAIModelConfiguration(api_key=os.getenv("AZURE_OPENAI_API_KEY"),api_version=os.getenv("AZURE_OPENAI_API_VERSION"),azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),azure_deployment="")


from promptflow.evals.evaluators import ContentSafetyEvaluator, RelevanceEvaluator, CoherenceEvaluator, GroundednessEvaluator, FluencyEvaluator, SimilarityEvaluator content_safety_evaluator = ContentSafetyEvaluator(project_scope=azure_ai_project) relevance_evaluator = RelevanceEvaluator(model_config=configuration) coherence_evaluator = CoherenceEvaluator(model_config=configuration) groundedness_evaluator = GroundednessEvaluator(model_config=configuration) fluency_evaluator = FluencyEvaluator(model_config=configuration) similarity_evaluator = SimilarityEvaluator(model_config=configuration)


from app_target import ModelEndpoints import pathlib import random from promptflow.evals.evaluate import evaluate models = ["gpt4-0613", "gpt35-turbo", "mistral7b", "phi3_mini_serverless" ] path = str(pathlib.Path(pathlib.Path.cwd())) + "/data.jsonl" for model in models:

   randomNum = random.randint(1111, 9999)
   results = evaluate(
       azure_ai_project=azure_ai_project, 
       evaluation_name="Eval-Run-"+str(randomNum)+"-"+model.title(), 
       data=path, 
       target=ModelEndpoints(env_var, model), 
       evaluators={ 
           "content_safety": content_safety_evaluator, 
           "coherence": coherence_evaluator, 
           "relevance": relevance_evaluator,
           "groundedness": groundedness_evaluator,
           "fluency": fluency_evaluator,
           "similarity": similarity_evaluator,
       }, 
       evaluator_config={ 
           "content_safety": { 
               "question": "${data.question}", 
               "answer": "${target.answer}"  
           }, 
           "coherence": { 
               "answer": "${target.answer}", 
               "question": "${data.question}"  
           }, 
           "relevance": { 
               "answer": "${target.answer}", 
               "context": "${data.context}", 
               "question": "${data.question}"  
           }, 
           "groundedness": { 
               "answer": "${target.answer}", 
               "context": "${data.context}", 
               "question": "${data.question}"  
           }, 
           "fluency": { 
               "answer": "${target.answer}", 
               "context": "${data.context}", 
               "question": "${data.question}"  
           }, 
           "similarity": { 
               "answer": "${target.answer}", 
               "context": "${data.context}", 
               "question": "${data.question}"  
           } 
       } 
   )



input_text = "Please recommend books with a theme similar to the movie 'Inception'." native_request = {"inputText": input_text} request = json.dumps(native_request) response = client.invoke_model(modelId=model_id, body=request) model_response = json.loads(response["body"].read()) print(model_response) model_id = "anthropic.claude-3-haiku-20240307-v1:0" user_message = "Describe the purpose of a 'hello world' program in one line." conversation = [{"role": "user","content": [{"text": user_message}],}] response = client.converse(modelId=model_id,messages=conversation,inferenceConfig={"maxTokens": 512, "temperature": 0.5, "topP": 0.9},) response_text = response["output"]["message"]["content"][0]["text"] print(response_text)

def load_data_to_query(question, data):

       return str(question) + ' Answer question based on following data: ' + str(data)

def read_message(message):

       return message.content

def lit_eval(text): try: return literal_eval(text) except SyntaxError: return text

def extract_dict(dictionary): return dictionary['extraction']

def extract_dictionary(message):

   text = message.content
   open_braces = 0
   in_dict = False
   start_index = 0
   for i, char in enumerate(text):
       if char == '{':
           if not in_dict:
               start_index = i
               in_dict = True
               open_braces += 1
       elif char == '}':
           open_braces -= 1
           if in_dict and open_braces == 0:
               dict_string = text[start_index:i + 1]
               try:
                   return literal_eval(dict_string)
               except ValueError as e:
                   print(f"Error parsing dictionary: {e}")
                   return None
       print("No dictionary found in the string.")
   return None

def get_table_from_test_set(image_file) -> str:

       table_path = ocr('images', 'images', image_file)
       loader = UnstructuredExcelLoader(table_path, mode="elements")
       docs = loader.load()
       return docs[0]

def get_table_from_test_set_by_table_id(table_id: str) -> str:

   table_path = ocr('images', f"./test_png/{table_id}.png")
   loader = UnstructuredExcelLoader(table_path, mode="elements")
   docs = loader.load()
   return docs[0]

def process_question(self, question, image_file):

   table = get_table_from_test_set(image_file)
   output = chain_main.invoke({"question": question, "table": table})
   # _memory.save_context({"human_input": question},{"context": output})
   return output

def route(self, info):

   if "simple" in str(info["question_type"]):
       return chain_simple_extraction
   else:
       return chain_complex


boto3_session = boto3.Session(region_name='us-east-1') bedrock_runtime = boto3_session.client(service_name="bedrock-runtime") llm = ChatBedrock(client=bedrock_runtime,model_id="anthropic.claude-3-sonnet-20240229-v1:0",

           model_kwargs={'temperature': 0},streaming=True,)

python_repl = PythonREPL() repl_tool = Tool(name="python_repl",

                description="A Python shell. Use this to execute python commands. "
                "Input should be a valid python command. If you want to see the output "
                "of a value, you should print it out with `print(...)`.",
                func=python_repl.run,
                )

tools = [repl_tool] agent = create_react_agent(llm, tools, agent_prompt) agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) chain_simple_extraction = ({"question": itemgetter("question"), "table": itemgetter("table")}

                          | simple_extraction_prompt | llm | RunnableLambda(read_message) | lit_eval)

chain_complex_extraction = ({"decomp_dict": itemgetter("decomp_dict") | RunnableLambda(extract_dict),

                            "table": itemgetter("table")}
                           | complex_extraction_prompt | llm | RunnableLambda(read_message))

chain_simple_or_complex = ({"question": itemgetter("question"), "table": itemgetter("table")}

                          | simple_or_complex_prompt | llm | RunnableLambda(read_message))

chain_decompose = ({"question": itemgetter("question")} | decomp_prompt | llm | extract_dictionary)

chain_complex = (RunnablePassthrough.assign(decomp_dict=chain_decompose)

                             | RunnablePassthrough.assign(data=chain_complex_extraction)
                             | RunnablePassthrough.assign(query=lambda x: load_data_to_query(x["question"], x['data']))
                             | {"input": itemgetter("query")}
                             | (RunnablePassthrough.assign(response=agent_executor)))


qa_agent = QuestionAnsweringAgent() output = qa_agent.process_question(question=question, image_file=image)