|  |   Tags: Blanking Manual revert   | 
| (2 intermediate revisions by the same user not shown) | 
| Line 1: | Line 1: | 
|  | import os, re, sys, json, base64, string
 |  | 
|  | import kotoba.chatbot_prompt as c_p
 |  | 
|  | import boto3
 |  | 
|  | from langchain import hub
 |  | 
|  | from langchain.text_splitter import RecursiveCharacterTextSplitter, MarkdownTextSplitter, MarkdownHeaderTextSplitter
 |  | 
|  | from langchain_aws import ChatBedrock
 |  | 
|  | from langchain.prompts import ChatPromptTemplate, PromptTemplate
 |  | 
|  | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
 |  | 
|  | from langchain_core.runnables import RunnablePassthrough, RunnableLambda
 |  | 
|  | from langchain_core.runnables.history import RunnableWithMessageHistory
 |  | 
|  | from langchain_core.output_parsers import StrOutputParser
 |  | 
|  | from langchain_core.chat_history import BaseChatMessageHistory
 |  | 
|  | from langchain_huggingface import HuggingFaceEmbeddings
 |  | 
|  | from langchain_openai import OpenAIEmbeddings
 |  | 
|  | from langchain_aws import BedrockEmbeddings
 |  | 
|  | from chromadb.utils.embedding_functions import create_langchain_embedding
 |  | 
|  | #from langchain.chat_models import ChatOpenAI
 |  | 
|  | from langchain_community.chat_models import ChatOpenAI
 |  | 
|  | #from langchain_community.embeddings import HuggingFaceEmbeddings
 |  | 
|  | from langchain_core.documents import Document # with .page_content
 |  | 
|  | #from llama_index.core import Document # with .text
 |  | 
|  | from langchain.chains.combine_documents import create_stuff_documents_chain
 |  | 
|  | from langchain.chains.history_aware_retriever import create_history_aware_retriever
 |  | 
|  | from langchain.chains.retrieval import create_retrieval_chain
 |  | 
|  | # from langchain.chains import create_retrieval_chain
 |  | 
|  | from langchain_community.chat_message_histories import ChatMessageHistory
 |  | 
|  | from langchain_community.chat_models import ChatOpenAI
 |  | 
|  | from langchain.agents import Tool, AgentExecutor, create_tool_calling_agent
 |  | 
|  | from langchain_community.tools import DuckDuckGoSearchRun
 |  | 
|  | from llama_index.core import ( SimpleDirectoryReader,VectorStoreIndex,StorageContext, load_index_from_storage, Settings)
 |  | 
|  | from llama_index.core.tools import QueryEngineTool, ToolMetadata
 |  | 
|  | from llama_index.core.node_parser import SimpleFileNodeParser, MarkdownElementNodeParser
 |  | 
|  | from llama_parse import LlamaParse
 |  | 
|  | from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext, ServiceContext
 |  | 
|  | from langchain_chroma import Chroma
 |  | 
|  | from llama_index.core import VectorStoreIndex
 |  | 
|  | from llama_index.vector_stores.chroma import ChromaVectorStore
 |  | 
|  | from llama_index.core import StorageContext
 |  | 
|  | from llama_index.embeddings.bedrock import BedrockEmbedding
 |  | 
|  | from llama_index.llms.bedrock import Bedrock
 |  | 
|  | import asyncio
 |  | 
|  | import chromadb
 |  | 
|  | import numpy as np
 |  | 
|  | import kotoba.pdf_tools as p_t
 |  | 
|  | import kotoba.text_clean as t_l
 |  | 
|  | 
 |  | 
 | 
|  | 
 |  | 
|  | #from langchain_pinecone import PineconeVectorStore
 |  | 
|  | #--------------------------------------parse-pdf--------------------------------------------------
 |  | 
|  | 
 |  | 
|  | import pymupdf
 |  | 
|  | from pymupdf4llm.helpers.get_text_lines import get_raw_lines, is_white
 |  | 
|  | from pymupdf4llm.helpers.multi_column import column_boxes
 |  | 
|  | 
 |  | 
|  | def pdf2tree(pdf_doc):
 |  | 
|  |     """Extracts text from PDF.
 |  | 
|  |     Args:
 |  | 
|  |         pdf_docs: A PDF document.
 |  | 
|  |     Returns:
 |  | 
|  |         str: The extracted text from the PDF documents.
 |  | 
|  |     """
 |  | 
|  |     from llmsherpa.readers import LayoutPDFReader
 |  | 
|  |     llmsherpa_api_url = "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all"
 |  | 
|  |     pdf_reader = LayoutPDFReader(llmsherpa_api_url)
 |  | 
|  |     doc = pdf_reader.read_pdf(pdf_doc)
 |  | 
|  |     docL = []
 |  | 
|  |     for s in doc.sections():
 |  | 
|  |         sectS = ''
 |  | 
|  |         for p in s.children:
 |  | 
|  |             sectS += p.to_text()
 |  | 
|  |         if sectS == '':
 |  | 
|  |             sectS = '-'
 |  | 
|  |         docL.append(Document(page_content=sectS,metadata={"sect":s.to_context_text(),"lev":s.level}))
 |  | 
|  |     for t in doc.tables():
 |  | 
|  |         docL.append(Document(page_content=t.to_text(),metadata={"table":s.block_idx,"lev":t.level}))
 |  | 
|  |     return docL
 |  | 
|  | 
 |  | 
|  | def pdf2md(pdf_doc,headers_split=None):
 |  | 
|  |     """Extracts text from PDF.
 |  | 
|  |     Args:
 |  | 
|  |         pdf_doc: A PDF document.
 |  | 
|  |     Returns:
 |  | 
|  |         str: The extracted text from the PDF documents.
 |  | 
|  |     """
 |  | 
|  |     #from langchain_community.document_loaders import PyMuPDFLoader
 |  | 
|  |     import pymupdf4llm
 |  | 
|  |     import pymupdf
 |  | 
|  |     # hdr_info=lambda s: ... to find the most popular font sizes and derive header levels based on them
 |  | 
|  |     imgDir = pdf_doc.split(".")[0] + "/"
 |  | 
|  |     collN = re.sub(".pdf","",pdf_doc).split("/")[-1]
 |  | 
|  |     hdr_info = p_t.IdentifyHeaders(pdf_doc)
 |  | 
|  |     md_text = pymupdf4llm.to_markdown(pdf_doc,write_images=True,image_path=imgDir,page_chunks=False,hdr_info=hdr_info) 
 |  | 
|  |     # parser = LlamaParse(api_key="...",result_type="markdown")
 |  | 
|  |     # documents = parser.load_data("./my_file.pdf") 
 |  | 
|  |     #single_sentences_list = re.split(r'(?<=[.?!])\s+', essay)
 |  | 
|  |     if headers_split == None:
 |  | 
|  |         headers_split = [("#","Chapter"),("##","Section"),('###','Subsection')]
 |  | 
|  |         headers_split = [("####","Chapter"),("######","Section"),('########','Subsection')]
 |  | 
|  |     splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_split)#,strip_headers=True,return_each_line=False,)
 |  | 
|  |     docL = splitter.split_text(md_text)
 |  | 
|  |     # for i,d in enumerate(docL):
 |  | 
|  |     #     titleS = "Document: " + collN + "\n".join([x + ": " + d.metadata[x] for x in d.metadata.keys()])
 |  | 
|  |     #     textS = titleS + "\n" + d.page_content
 |  | 
|  |     #     docL[i].page_content = textS
 |  | 
|  |     #splitter = RecursiveCharacterTextSplitter(chunk_size = 1000, chunk_overlap=200)
 |  | 
|  |     #splitter = SentenceSplitter(chunk_size=200,chunk_overlap=15)
 |  | 
|  |     #elements = partition_pdf(filename=pdf_doc,strategy="hi_res",infer_table_structure=True,model_name="yolox")
 |  | 
|  |     return docL
 |  | 
|  | 
 |  | 
|  | def pdf_llama(pdf_doc,collN):
 |  | 
|  |     os.environ["LLAMA_CLOUD_API_KEY"] = "llx-"
 |  | 
|  |     llm = get_llm()
 |  | 
|  |     parsing_instructions = '''The document describes IT security policies for audit. It contains many tables. Answer questions using the information in this article and be precise.'''
 |  | 
|  |     documents = LlamaParse(result_type="markdown", parsing_instructions=parsing_instructions).load_data(pdf_doc)
 |  | 
|  |     print(documents[0].text[:1000])
 |  | 
|  |     node_parser = MarkdownElementNodeParser(llm=llm, num_workers=8).from_defaults()
 |  | 
|  |     nodes = node_parser.get_nodes_from_documents(documents)
 |  | 
|  |     base_nodes, objects = node_parser.get_nodes_and_objects(nodes)
 |  | 
|  |     return base_nodes, objects
 |  | 
|  | 
 |  | 
|  | def pdf_page(pdf_docs,chunk_size=100,chunk_overlap=15):
 |  | 
|  |     """Extracts text from PDF documents.
 |  | 
|  |     Args:
 |  | 
|  |         pdf_docs: A list of PDF documents.
 |  | 
|  | 
 |  | 
|  |     Returns:
 |  | 
|  |         str: The extracted text from the PDF documents.
 |  | 
|  |     """
 |  | 
|  |     from PyPDF2 import PdfReader
 |  | 
|  |     text = ""
 |  | 
|  |     docL = []
 |  | 
|  |     for pdf in pdf_docs:
 |  | 
|  |         pdf_reader = PdfReader(pdf)
 |  | 
|  |         for i, page in enumerate(pdf_reader.pages):
 |  | 
|  |             text = page.extract_text()
 |  | 
|  |             docL.append(Document(page_content=text,metadata={"page":i}))
 |  | 
|  |     # text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,chunk_overlap=chunk_overlap)
 |  | 
|  |     # text_chunks = text_splitter.split_text(textL)
 |  | 
|  |     return docL
 |  | 
|  | 
 |  | 
|  | #--------------------------------------llm-operations--------------------------------------------------
 |  | 
|  | modL = ['amazon.titan-tg1-large', 'amazon.titan-text-lite-v1', 'amazon.titan-text-express-v1', 'anthropic.claude-instant-v1', 'anthropic.claude-v2:1', 'anthropic.claude-v2', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mixtral-8x7b-instruct-v0:1', 'mistral.mistral-large-2402-v1:0']
 |  | 
|  | 
 |  | 
|  | def create_summary(textL,llm):
 |  | 
|  |     chain = ({"doc": lambda x: x}
 |  | 
|  |              #| ChatPromptTemplate.from_template("Summarize the following document:\n\n{doc}")
 |  | 
|  |              | ChatPromptTemplate.from_template("Fassen Sie folgendes Dokument zusammen:\n\n{doc}")
 |  | 
|  |              # | ChatOpenAI(max_retries=0)
 |  | 
|  |              | llm
 |  | 
|  |              | StrOutputParser())
 |  | 
|  |     summL = chain.batch(textL, {"max_concurrency": 5})
 |  | 
|  |     return summL
 |  | 
|  | 
 |  | 
|  | def create_keywords(docL):
 |  | 
|  |     keyL = []
 |  | 
|  |     for doc in docL:
 |  | 
|  |         d = doc.copy()
 |  | 
|  |         d.page_content = t_l.extract_keyword(doc.page_content)
 |  | 
|  |         keyL.append(d)
 |  | 
|  |     return keyL
 |  | 
|  | 
 |  | 
|  | 
 |  | 
|  | def ask_openai(q,retL):
 |  | 
|  |     chain = ({"doc": lambda x: x}
 |  | 
|  |              | ChatPromptTemplate.from_template("The following document answers "+q+":\n\n{doc} \n\n Answer your confidence")
 |  | 
|  |              | ChatOpenAI(max_retries=0)
 |  | 
|  |              | StrOutputParser())
 |  | 
|  |     summaries = chain.batch(retL, {"max_concurrency": 5})
 |  | 
|  |     return summaries
 |  | 
|  | 
 |  | 
|  | async def async_generate_response(llm, prompt):
 |  | 
|  |     return await llm.ainvoke(prompt)
 |  | 
|  | 
 |  | 
|  | async def call_async(llm, qL):
 |  | 
|  |      tasks = [async_generate_response(llm, q) for q in qL]
 |  | 
|  |      responses = await asyncio.gather(*tasks)
 |  | 
|  |      for idx, response in enumerate(responses):
 |  | 
|  |           print(f"User {idx + 1} Response:", response)
 |  | 
|  |      return responses
 |  | 
|  | 
 |  | 
|  | def collect_async(llm,qL):
 |  | 
|  | 	respL = asyncio.run(multiple_call(llm, qL))
 |  | 
|  | 	return respL
 |  | 
|  | 
 |  | 
|  | 
 |  | 
|  | def ask_bedrock_image(f,baseDir):
 |  | 
|  |     client = boto3.client("bedrock-runtime")
 |  | 
|  |     model_id = "amazon.titan-text-lite-v1"
 |  | 
|  |     with open(baseDir + "/" + f, 'rb') as image_file:
 |  | 
|  |         encoded_image = base64.b64encode(image_file.read()).decode()
 |  | 
|  | 
 |  | 
|  |     model_id = "anthropic.claude-3-haiku-20240307-v1:0"
 |  | 
|  |     payload = {"messages": [{"role": "user","content": [{"type": "image","source": {"type": "base64","media_type": "image/jpeg","data": encoded_image}},{"type": "text","text": "Describe the content of this image"}]}],"max_tokens": 1000,"anthropic_version": "bedrock-2023-05-31"}
 |  | 
|  |     response = client.invoke_model(modelId=model_id,contentType="application/json",body=json.dumps(payload))
 |  | 
|  |     output_binary = response["body"].read()
 |  | 
|  |     output_json = json.loads(output_binary)
 |  | 
|  |     output = output_json["content"][0]["text"]
 |  | 
|  |     return output
 |  | 
|  | 
 |  | 
|  | def image_description(baseDir,fL):
 |  | 
|  |     imgL = []
 |  | 
|  |     for f in fL:
 |  | 
|  |         print(f)
 |  | 
|  |         caption = ask_bedrock_image(f,baseDir)
 |  | 
|  |         imgL.append(Document(page_content=caption,metadata={"image_file":f}))
 |  | 
|  |     return imgL
 |  | 
|  | 
 |  | 
|  | 
 |  | 
|  | def rank_answers(llm,resL):
 |  | 
|  |     doc = ".".join([str(i) + ") " + x for i,x in enumerate(resL)])    
 |  | 
|  |     chain = ({"doc": lambda x: x}
 |  | 
|  |              | ChatPromptTemplate.from_template("What answer is the most confident in the following series:\n\n{doc}")
 |  | 
|  |              # | ChatOpenAI(max_retries=0)
 |  | 
|  |              | llm
 |  | 
|  |              | StrOutputParser())
 |  | 
|  |     ansL = chain.batch([doc], {"max_concurrency": 1})
 |  | 
|  |     return ansL
 |  | 
|  | 
 |  | 
|  | def summarize_answers(llm,q,resL):
 |  | 
|  |     doc = ".".join([str(i) + ") " + x for i,x in enumerate(resL)])    
 |  | 
|  |     chain = ({"doc": lambda x: x}
 |  | 
|  |              #| ChatPromptTemplate.from_template("Please write one consistent paragraph summarizing the content of each answer discarding the non confident answers:\n\n{doc}")
 |  | 
|  |              | ChatPromptTemplate.from_template("Bitte schreiben Sie einen zusammenhängenden Absatz, der den Inhalt jeder Antwort zusammenfasst, und lassen Sie die unsicheren Antworten weg:\n\n{doc}")
 |  | 
|  |              # | ChatOpenAI(max_retries=0)
 |  | 
|  |              | llm
 |  | 
|  |              | StrOutputParser())
 |  | 
|  |     ansL = chain.batch([doc], {"max_concurrency": 1})
 |  | 
|  |     return ansL
 |  | 
|  | 
 |  | 
|  | def get_llm():
 |  | 
|  |     llm = ChatOpenAI()
 |  | 
|  |     return llm
 |  | 
|  | 
 |  | 
|  | def get_modelList():
 |  | 
|  |     boto3_session = boto3.Session()
 |  | 
|  |     bedrock = boto3_session.client(service_name="bedrock")
 |  | 
|  |     modD = bedrock.list_foundation_models()['modelSummaries']
 |  | 
|  |     modL = [x['modelId'] for x in modD if x['modelLifecycle']['status'] == 'ACTIVE']
 |  | 
|  |     return modL
 |  | 
|  | 
 |  | 
|  | def test_modelList():
 |  | 
|  |     modL1 = get_modelList()
 |  | 
|  |     modL = []
 |  | 
|  |     for l in modL1:
 |  | 
|  |         try:
 |  | 
|  |             llm = get_llm_bedrock(model_id=l)
 |  | 
|  |             llm.invoke("2+2?")
 |  | 
|  |             modL.append(l)
 |  | 
|  |         except:
 |  | 
|  |             print("no " + str(l))
 |  | 
|  |     return modL
 |  | 
|  | 
 |  | 
|  | def get_llm_bedrock(model_id="anthropic.claude-3-sonnet-20240229-v1:0"):
 |  | 
|  |     params = {"max_tokens_to_sample": 4096,"temperature": 0,"top_k": 0,"top_p": 0}
 |  | 
|  |     boto3_session = boto3.Session()
 |  | 
|  |     bedrock_runtime = boto3_session.client(service_name="bedrock-runtime")
 |  | 
|  |     llm = ChatBedrock(client=bedrock_runtime,model_id=model_id,
 |  | 
|  |                       model_kwargs={'temperature': 0},streaming=True,)
 |  | 
|  |     return llm
 |  | 
|  | 
 |  | 
|  | def get_embeddings_bedrock(model_id="anthropic.claude-3-5-sonnet-20240620-v1:0"):
 |  | 
|  |     # bedrock_client = boto3.client(service_name='bedrock-runtime')
 |  | 
|  |     # embeddings = BedrockEmbeddings(model_id=model_id,client=bedrock_client)
 |  | 
|  |     embeddings = BedrockEmbeddings()
 |  | 
|  |     return embeddings
 |  | 
|  | 
 |  | 
|  | def get_embeddings_openai():
 |  | 
|  |     openai_ef = embedding_functions.OpenAIEmbeddingFunction(model_name="text-embedding-ada-002",api_key=os.environ['OPENAI_API_KEY'])
 |  | 
|  |     return openai_ef
 |  | 
|  | 
 |  | 
|  | def get_embeddings_hugging():
 |  | 
|  |     langchain_embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
 |  | 
|  |     embeddings = create_langchain_embedding(langchain_embeddings)
 |  | 
|  |     return embeddings
 |  | 
|  | 
 |  | 
|  | def get_embeddings():
 |  | 
|  |     """pointer to preferred option"""
 |  | 
|  |     return get_embeddings_bedrock()
 |  | 
|  |     #return get_embeddings_hugging()
 |  | 
|  | 
 |  | 
|  | def get_chat_history(retriever):
 |  | 
|  |     rephrase_prompt = hub.pull("langchain-ai/chat-langchain-rephrase")
 |  | 
|  |     llm = ChatOpenAI()
 |  | 
|  |     chain = create_history_aware_retriever(llm, retriever, rephrase_prompt)
 |  | 
|  |     #chain.invoke({"input": "...", "chat_history": })
 |  | 
|  |     return chain
 |  | 
|  | 
 |  | 
|  | def get_chat_message() -> BaseChatMessageHistory:
 |  | 
|  |     return ChatMessageHistory()
 |  | 
|  | 
 |  | 
|  | #--------------------------------------vector-storage--------------------------------------------------
 |  | 
|  | 
 |  | 
|  | def embed_text(docL):
 |  | 
|  |     try:
 |  | 
|  |         textL = [x.page_content for x in docL]        
 |  | 
|  |     except:
 |  | 
|  |         textL = [x.text for x in docL]
 |  | 
|  |     embeddings = get_embeddings()
 |  | 
|  |     embdL = embeddings.embed_documents(textL)
 |  | 
|  |     return embdL
 |  | 
|  | 
 |  | 
|  | def create_collection(docL,collN,baseDir):
 |  | 
|  |     """create two collections from a pdf.
 |  | 
|  |     Args:
 |  | 
|  |         pdf_doc: A PDF document.
 |  | 
|  |     Returns:
 |  | 
|  |         collT: collection of texts
 |  | 
|  |     """
 |  | 
|  |     #from langchain.vectorstores import Chroma
 |  | 
|  |     #from langchain_community.vectorstores import Chroma
 |  | 
|  |     from chromadb.utils import embedding_functions
 |  | 
|  |     from chromadb import Documents, EmbeddingFunction, Embeddings
 |  | 
|  |     embeddings = get_embeddings()
 |  | 
|  |     session = boto3.Session()
 |  | 
|  |     embeddings = embedding_functions.AmazonBedrockEmbeddingFunction(session=session)
 |  | 
|  |     idL = ["%06d" % x for x in range(len(docL))]
 |  | 
|  |     try:
 |  | 
|  |         textL = [x.page_content for x in docL]        
 |  | 
|  |     except:
 |  | 
|  |         textL = [x.text for x in docL]
 |  | 
|  |     metaL = [x.metadata for x in docL]
 |  | 
|  |     for i in range(len(docL)):
 |  | 
|  |         metaL[i]['id'] = idL[i]
 |  | 
|  |     client = chromadb.PersistentClient(path=baseDir + "/chroma")
 |  | 
|  |     #embdL = embeddings.embed_documents(textL)
 |  | 
|  |     try: 
 |  | 
|  |         client.delete_collection(name=collN)
 |  | 
|  |     except:
 |  | 
|  |         pass
 |  | 
|  |     collT = client.create_collection(name=collN,metadata={"hnsw:space":"cosine"},embedding_function=embeddings)
 |  | 
|  |     #collT.add(embeddings=embdL,documents=textL,metadatas=metaL,ids=idL)
 |  | 
|  |     collT.add(documents=textL,metadatas=metaL,ids=idL)
 |  | 
|  |     return collT
 |  | 
|  | 
 |  | 
|  | def load_chroma(collN,baseDir):
 |  | 
|  |     client = chromadb.PersistentClient(path=baseDir + "/chroma")
 |  | 
|  |     collT = client.get_or_create_collection(name=collN,metadata={"hnsw:space":"cosine","hnsw:M": 32})
 |  | 
|  |     return collT
 |  | 
|  | 
 |  | 
|  | def get_chroma_retriever(collN,baseDir):    
 |  | 
|  |     client = chromadb.PersistentClient(path=baseDir + "chroma/")
 |  | 
|  |     col = client.get_or_create_collection(collN)
 |  | 
|  |     embeddings = get_embeddings()
 |  | 
|  |     db = Chroma(client=client, collection_name=collN, embedding_function=embeddings)
 |  | 
|  |     retriever = db.as_retriever()
 |  | 
|  |     return retriever
 |  | 
|  | 
 |  | 
|  | def get_chroma_query(collN,baseDir,model_id="amazon.titan-text-express-v1"):
 |  | 
|  |     embeddings = get_embeddings()
 |  | 
|  |     embed_model = BedrockEmbedding() 
 |  | 
|  |     llm = Bedrock(model=model_id)
 |  | 
|  |     db = chromadb.PersistentClient(path=baseDir + "chroma/")
 |  | 
|  |     coll = db.get_or_create_collection(collN)
 |  | 
|  |     vector_store = ChromaVectorStore(chroma_collection=coll)
 |  | 
|  |     storage_context = StorageContext.from_defaults(vector_store=vector_store)
 |  | 
|  |     index = VectorStoreIndex.from_vector_store(vector_store, storage_context=storage_context,embed_model=embed_model,llm=llm)
 |  | 
|  |     query_engine = index.as_query_engine(llm=llm)
 |  | 
|  |     return query_engine
 |  | 
|  | 
 |  | 
|  | def list_collection(baseDir):
 |  | 
|  |     client = chromadb.PersistentClient(path=baseDir + "chroma/")
 |  | 
|  |     collL = [c.name for c in client.list_collections()]
 |  | 
|  |     print(collL)
 |  | 
|  |     return collL
 |  | 
|  | 
 |  | 
|  | def translate_dataframe(df, llm, prompt="Please translate from German to English the following paragraph:\n"):
 |  | 
|  |     rowL = []
 |  | 
|  |     for i, row in df.replace(np.nan,'').iterrows():
 |  | 
|  |         print(i)
 |  | 
|  |         colL = []
 |  | 
|  |         for col in row:
 |  | 
|  |             query = prompt + "\n" + col
 |  | 
|  |             res = llm.invoke(query)
 |  | 
|  |             colL.append(res.content)
 |  | 
|  |         rowL.append(colL)
 |  | 
|  |     rowL = np.array(rowL)
 |  | 
|  |     transD = pd.DataFrame(rowL)
 |  | 
|  |     transD.columns = list(df.columns)
 |  | 
|  |     transD = transD.replace("Here is the translation from German to English:","",regex=True)
 |  | 
|  |     transD = transD.replace("Here is the English translation of the German paragraph:","",regex=True)
 |  | 
|  |     transD = transD.replace("\n\n","",regex=True)
 |  | 
|  |     return transD
 |  | 
|  | 
 |  | 
|  | def create_neo4j(docL,collN,baseDir,neopass):
 |  | 
|  |     from neo4j import GraphDatabase
 |  | 
|  |     from neo4j_graphrag.indexes import create_vector_index
 |  | 
|  |     from neo4j_graphrag.indexes import upsert_vector
 |  | 
|  |     driver = GraphDatabase.driver("neo4j://localhost:7687", auth=("neo4j",neopass))
 |  | 
|  |     create_vector_index(driver,collN,label="Chunk",embedding_property="embedding",dimensions=3072,similarity_fn="euclidean")
 |  | 
|  |     try:
 |  | 
|  |         textL = [x.page_content for x in docL]        
 |  | 
|  |     except:
 |  | 
|  |         textL = [x.text for x in docL]
 |  | 
|  |     metaL = [x.metadata for x in docL]
 |  | 
|  |     client = chromadb.PersistentClient(path=baseDir + "/chroma")
 |  | 
|  |     embeddings = get_embeddings()
 |  | 
|  |     embdL = embeddings.embed_documents(textL)
 |  | 
|  |     upsert_vector(driver,node_id=0,embedding_property="embedding",vector=embdL,)
 |  | 
|  |     driver.close()
 |  | 
|  | 
 |  | 
|  | def search_neo4j(q,llm,collN,neopass):
 |  | 
|  |     from neo4j import GraphDatabase
 |  | 
|  |     from neo4j_graphrag.generation import GraphRAG
 |  | 
|  |     from neo4j_graphrag.retrievers import VectorRetriever
 |  | 
|  |     driver = GraphDatabase.driver("neo4j://localhost:7687", auth=("neo4j",neopass))
 |  | 
|  |     embeddings = get_embeddings()
 |  | 
|  |     retriever = VectorRetriever(driver, collN, embeddings)
 |  | 
|  |     rag = GraphRAG(retriever=retriever, llm=llm)
 |  | 
|  |     #qV = embeddings.embed_documents(q)
 |  | 
|  |     response = rag.search(query_text=q, retriever_config={"top_k": 5})
 |  | 
|  |     driver.close()
 |  | 
|  |     return response
 |  | 
|  |     
 |  | 
|  | def faiss_vector_storage(docL,collN,baseDir):
 |  | 
|  |     """Creates a FAISS vector store from the given text chunks.
 |  | 
|  |     Args:
 |  | 
|  |         text_chunks: A list of text chunks to be vectorized.
 |  | 
|  |     Returns:
 |  | 
|  |         FAISS: A FAISS vector store.
 |  | 
|  |     """
 |  | 
|  |     import faiss
 |  | 
|  |     from llama_index.vector_stores.faiss import FaissVectorStore
 |  | 
|  |     from langchain_community.vectorstores import FAISS
 |  | 
|  |     # from langchain.vectorstores import FAISS
 |  | 
|  |     # from langchain.indexes.vectorstore import VectorStoreIndexWrapper
 |  | 
|  |     try:
 |  | 
|  |         textL = [x.text for x in docL]
 |  | 
|  |     except:
 |  | 
|  |         textL = [x.page_content for x in docL]        
 |  | 
|  |     metaL = [x.metadata for x in docL]
 |  | 
|  |     faiss_index = faiss.IndexFlatL2(1536) # dimensions of text-ada-embedding-002
 |  | 
|  |     embeddings = get_embeddings()
 |  | 
|  |     # vectorstore_faiss = FAISS.from_documents(docs,bedrock_embeddings)
 |  | 
|  |     # Store the Faiss index to a file
 |  | 
|  |     # faiss.write_index(vectorstore_faiss.index, "../../data/index/prompt_embeddings.index")
 |  | 
|  |     vector_store = FAISS.from_texts(textL, embedding=embeddings)
 |  | 
|  |     vector_store.save_local(baseDir + "faiss/" + collN)
 |  | 
|  |     #vector_store = FaissVectorStore(faiss_index=faiss_index)
 |  | 
|  |     #storage_context = StorageContext.from_defaults(vector_store=vector_store)
 |  | 
|  |     #index = VectorStoreIndex.from_documents(docL, storage_context=storage_context)
 |  | 
|  |     #index.storage_context.persist(persist_dir=baseDir+"./faiss")    
 |  | 
|  |     #return index
 |  | 
|  |     return vector_store
 |  | 
|  | 
 |  | 
|  | def load_faiss(collN,baseDir):
 |  | 
|  |     import faiss
 |  | 
|  |     from langchain_community.vectorstores import FAISS
 |  | 
|  |     from llama_index.vector_stores.faiss import FaissVectorStore
 |  | 
|  |     embeddings = get_embeddings()
 |  | 
|  |     vector_store = FAISS.load_local(baseDir+"faiss/"+collN, embeddings, allow_dangerous_deserialization=True)
 |  | 
|  |     vector_store = FaissVectorStore.from_persist_dir(baseDir+"faiss/"+collN)
 |  | 
|  |     storage_context = StorageContext.from_defaults(vector_store=vector_store, persist_dir=baseDir+"faiss/"+collN)
 |  | 
|  |     index = load_index_from_storage(storage_context=storage_context)
 |  | 
|  |     return index
 |  | 
|  | 
 |  | 
|  | 
 |  | 
|  | def search_keywords(docL,keyL):
 |  | 
|  |     retL = []
 |  | 
|  |     for d in docL:
 |  | 
|  |         for k in keyL:
 |  | 
|  |             if re.search(k,d.page_content):
 |  | 
|  |                 retL.append(d)
 |  | 
|  |                 break
 |  | 
|  |     return retL
 |  | 
|  | 
 |  | 
|  | def qdrant_vector_storage(docL,collN,baseDir):
 |  | 
|  |     """Creates a qdrant vector store from the given text chunks.
 |  | 
|  |     Args:
 |  | 
|  |         docL: document list
 |  | 
|  |         collN: collection name
 |  | 
|  |         baseDir: directory for persistent storage
 |  | 
|  |     Returns:
 |  | 
|  |        A vector store.
 |  | 
|  |     """
 |  | 
|  |     from qdrant_client import QdrantClient
 |  | 
|  |     from qdrant_client.models import PointStruct
 |  | 
|  |     client = QdrantClient(host="localhost", port=6333)
 |  | 
|  |     if not client.collection_exists(collN):
 |  | 
|  |         client.create_collection(collection_name=collN,vectors_config=VectorParams(size=100, distance=Distance.COSINE))
 |  | 
|  |     pointL = [PointStruct(id=idx,vector=vector.tolist(),payload={"color": "red", "rand_number": idx % 10})]
 |  | 
|  |     for idx, vector in enumerate(docL):
 |  | 
|  |         client.upsert(collection_name=collN,points=pointL)
 |  | 
|  |     #hits = client.search(collection_name=collN,query_vector=query_vector,limit=5)
 |  | 
|  |     return client
 |  | 
|  | 
 |  | 
|  | def elastic_vector_storage(docL,collN,baseDir):
 |  | 
|  |     """Creates a elasticsearch vector store from the given text chunks.
 |  | 
|  |     Args:
 |  | 
|  |         text_chunks: A list of text chunks to be vectorized.
 |  | 
|  |     Returns:
 |  | 
|  |         elastic search vector store.
 |  | 
|  |     """
 |  | 
|  |     from llama_index.vector_stores.elasticsearch import ElasticsearchStore, AsyncDenseVectorStrategy
 |  | 
|  |     from llama_index.core import StorageContext, VectorStoreIndex
 |  | 
|  |     vector_store = ElasticsearchStore(index_name=collN,es_url="http://localhost:9200",retrieval_strategy=AsyncDenseVectorStrategy())
 |  | 
|  |     storage_context = StorageContext.from_defaults(vector_store=vector_store)
 |  | 
|  |     index = VectorStoreIndex(docL, storage_context=storage_context)
 |  | 
|  |     # retriever = index.as_retriever()
 |  | 
|  |     # results = retriever.retrieve(query)
 |  | 
|  |     # query_engine = index.as_query_engine()
 |  | 
|  |     # response = query_engine.query(query)
 |  | 
|  |     return index
 |  | 
|  | 
 |  | 
|  | def pinecone_vector_storage(pdf_doc,baseDir):
 |  | 
|  |     """Creates a Pinecone vector store from the given text chunks.
 |  | 
|  |     Args:
 |  | 
|  |         text_chunks: A list of text chunks to be vectorized.
 |  | 
|  |     Returns:
 |  | 
|  |         PineconeVectorStore: A Pinecone vector store.
 |  | 
|  |     """
 |  | 
|  |     vector_store = None
 |  | 
|  |     os.environ['PINECONE_API_KEY'] = st.session_state.pinecone_api_key
 |  | 
|  |     if st.session_state.embedding_model == "HuggingFaceEmbeddings":
 |  | 
|  |         embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
 |  | 
|  |         try:
 |  | 
|  |             # Clear existing index data if there's any
 |  | 
|  |             PineconeVectorStore.from_existing_index(
 |  | 
|  |                 index_name=st.session_state.pinecone_index,
 |  | 
|  |                 embedding=embeddings
 |  | 
|  |             ).delete(delete_all=True)
 |  | 
|  |         except Exception as e:
 |  | 
|  |             print("The index is empty")
 |  | 
|  |         finally:
 |  | 
|  |             vector_store = PineconeVectorStore.from_texts(
 |  | 
|  |                 text_chunks,
 |  | 
|  |                 embedding=embeddings,
 |  | 
|  |                 index_name=st.session_state.pinecone_index
 |  | 
|  |             )
 |  | 
|  |     return vector_store
 |  | 
|  |     
 |  | 
|  | #--------------------------------------chains--------------------------------------------------
 |  | 
|  | 
 |  | 
|  | def section_summary(docL,llm,collN):
 |  | 
|  |     """create two collections from a pdf, chapter wise and their summaries.
 |  | 
|  |     Args:
 |  | 
|  |         pdf_doc: A PDF document.
 |  | 
|  |     Returns:
 |  | 
|  |         collT, collS: collection of texts and theirs summaries
 |  | 
|  |     """
 |  | 
|  |     textL = []
 |  | 
|  |     for i,d in enumerate(docL):
 |  | 
|  |         titleS = "Document: " + collN + "\n".join([x + ": " + d.metadata[x] for x in d.metadata.keys()])
 |  | 
|  |         try :
 |  | 
|  |             textS = titleS + "\n" + d.page_content
 |  | 
|  |         except:
 |  | 
|  |             textS = titleS + "\n" + d.text
 |  | 
|  |         textL.append(textS)
 |  | 
|  |     metaL = [x.metadata for x in docL]
 |  | 
|  |     idL = ["%06d" % x for x in range(len(textL))]
 |  | 
|  |     summL = create_summary(textL,llm)
 |  | 
|  |     sumL = []
 |  | 
|  |     for i,x in enumerate(summL):
 |  | 
|  |         sumL.append(Document(page_content=x,metadata=metaL[i]))
 |  | 
|  |     return sumL
 |  | 
|  | 
 |  | 
|  | def format_docL(docs):
 |  | 
|  |     """Formats the given documents into a list."""
 |  | 
|  |     return [doc for doc in docs]
 |  | 
|  | 
 |  | 
|  | def format_docs(docs):
 |  | 
|  |   return "\n\n".join(doc.page_content for doc in docs)
 |  | 
|  | 
 |  | 
|  | def get_vectorstore(collN,baseDir):
 |  | 
|  |   embeddings = get_embeddings()
 |  | 
|  |   # vectorstore = Chroma.from_documents(documents, openai)
 |  | 
|  |   client = chromadb.PersistentClient(path=baseDir + "/chroma")
 |  | 
|  |   db = Chroma(client=client,embedding_function=embeddings,collection_name=collN,collection_metadata={"hnsw:space":"cosine"})
 |  | 
|  |   #con = db.similarity_search_with_relevance_scores(q)
 |  | 
|  |   return db
 |  | 
|  | 
 |  | 
|  | def get_retrieval_qa(collN,baseDir):
 |  | 
|  |     db = c_t.get_vectorstore(collN,baseDir)
 |  | 
|  |     qa = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0),chain_type="stuff",retriever=db.as_retriever(),return_source_documents=True,)
 |  | 
|  |     return qa
 |  | 
|  | 
 |  | 
|  | def get_chain_confidence(llm,collN,baseDir):
 |  | 
|  |   prompt = PromptTemplate(input_variables=["question","context"], template=c_p.promptConf)
 |  | 
|  |   db = get_vectorstore(collN,baseDir)
 |  | 
|  |   chain = ({'context': db.as_retriever(search_kwargs={'k':5}) | format_docs, "question": RunnablePassthrough()} | prompt | llm | c_p.parserS)
 |  | 
|  |   # chain = ({'context': db.as_retriever(search_kwargs={'k':3}) | format_docs, "question": RunnablePassthrough()} | prompt | llm)
 |  | 
|  |   return chain
 |  | 
|  | 
 |  | 
|  | def format_confidence(res):
 |  | 
|  |     try:
 |  | 
|  |         res['answer'] = bool(c_p.yesRe.match(res['answer']))
 |  | 
|  |         res['confidence'] = float(res['confidence'])
 |  | 
|  |     except:
 |  | 
|  |         pass
 |  | 
|  |     return res
 |  | 
|  | 
 |  | 
|  | def chain_inspect(model, retriever, question):
 |  | 
|  |     def inspect(state):
 |  | 
|  |         """Print the state passed between Runnables in a langchain and pass it on"""
 |  | 
|  |         print(state)
 |  | 
|  |         return state
 |  | 
|  |     
 |  | 
|  |     template = """Answer the question based only on the following context:
 |  | 
|  |     {context}
 |  | 
|  |     Question: {question}
 |  | 
|  |     """
 |  | 
|  |     prompt = ChatPromptTemplate.from_template(template)
 |  | 
|  |     chain = (
 |  | 
|  |         {"context": retriever, "question": RunnablePassthrough()}
 |  | 
|  |         | RunnableLambda(inspect)  # Add the inspector here to print the intermediate results
 |  | 
|  |         | prompt
 |  | 
|  |         | model
 |  | 
|  |         | StrOutputParser()
 |  | 
|  |     )
 |  | 
|  |     resp = chain.invoke("what is a data process agreement?")
 |  | 
|  |     return resp
 |  | 
|  | 
 |  | 
|  | def create_conversational_rag_chain(model, retriever, get_history, agentDef=None):
 |  | 
|  |     """
 |  | 
|  |     Creates a conversational RAG chain. This is a question-answering (QA) system with the ability to consider historical context.
 |  | 
|  |     Parameters:
 |  | 
|  |     model: The model selected by the user.
 |  | 
|  |     retriever: The retriever to use for fetching relevant documents.
 |  | 
|  |     Returns:
 |  | 
|  |     RunnableWithMessageHistory: The conversational chain that generates the answer to the query.
 |  | 
|  |     """
 |  | 
|  |     contextualize_q_system_prompt = """Given a chat history and the latest user question \
 |  | 
|  |     which might reference context in the chat history, formulate a standalone question \
 |  | 
|  |     which can be understood without the chat history. Do NOT answer the question, \
 |  | 
|  |     just reformulate it if needed and otherwise return it as is."""
 |  | 
|  |     contextualize_q_prompt = ChatPromptTemplate.from_messages([("system", contextualize_q_system_prompt),MessagesPlaceholder("chat_history"),("human", "{input}"),])
 |  | 
|  |     history_aware_retriever = create_history_aware_retriever(model,retriever | format_docL, contextualize_q_prompt)
 |  | 
|  |     if agentDef == None:
 |  | 
|  |         agentDef = "You are an assistant for question-answering tasks. \n"
 |  | 
|  |     qa_system_prompt = (agentDef + "Use the following pieces of retrieved context to answer the question. "
 |  | 
|  |                      "If you don't know the answer, say that you don't know. "
 |  | 
|  |                      # "Use three sentences maximum and keep the answer concise."
 |  | 
|  |                      "\n\n"
 |  | 
|  |                      "{context}")
 |  | 
|  |     #prompt = ChatPromptTemplate.from_messages([("system", qa_system_prompt),("human", "{input}"),])
 |  | 
|  |     qa_prompt = ChatPromptTemplate.from_messages([("system",qa_system_prompt),MessagesPlaceholder("chat_history"),("human", "{input}"),])
 |  | 
|  |     question_answer_chain = create_stuff_documents_chain(model, qa_prompt)
 |  | 
|  |     # rag_chain = create_retrieval_chain(retriever, question_answer_chain)
 |  | 
|  |     rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
 |  | 
|  |     conversational_rag_chain = RunnableWithMessageHistory(rag_chain,get_history,input_messages_key="input",history_messages_key="chat_history",output_messages_key="answer",)
 |  | 
|  |     return conversational_rag_chain
 |  | 
|  | 
 |  | 
|  | def create_qa_chain(model, retriever, agentDef=None):
 |  | 
|  |     """
 |  | 
|  |     Creates a question-answering (QA) chain for a chatbot without considering historical context.
 |  | 
|  |     Parameters:
 |  | 
|  |     model: The model selected by the user.
 |  | 
|  |     retriever: The retriever to use for fetching relevant documents.
 |  | 
|  |     Returns:
 |  | 
|  |     chain: it takes a user's query as input and produces a chatbot's response as output.
 |  | 
|  |     """
 |  | 
|  |     if agentDef == None:
 |  | 
|  |         agentDef = "You are an assistant for question-answering tasks. \n"
 |  | 
|  |     qa_system_prompt = agentDef + """Use the following pieces of retrieved context to answer the question. \
 |  | 
|  |     If you don't know the answer, just say that you don't know. \
 |  | 
|  |     {context}"""
 |  | 
|  |     qa_prompt_no_memory = ChatPromptTemplate.from_messages([("system", qa_system_prompt),("human", "{input}"),])
 |  | 
|  |     question_answer_chain = create_stuff_documents_chain(model, qa_prompt_no_memory)
 |  | 
|  |     chain = create_retrieval_chain(retriever, question_answer_chain)
 |  | 
|  |     return chain
 |  |