No edit summary
No edit summary
Line 1: Line 1:
import os, re, sys, json, base64, string
import re, json, os, sys
import kotoba.chatbot_prompt as c_p
import instructor
import boto3
import openai
from langchain import hub
import requests
from langchain.text_splitter import RecursiveCharacterTextSplitter, MarkdownTextSplitter, MarkdownHeaderTextSplitter
from graphviz import Digraph
from langchain_aws import ChatBedrock
from langchain_community.graphs import Neo4jGraph
from langchain.prompts import ChatPromptTemplate, PromptTemplate
from neo4j import GraphDatabase
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_openai import OpenAIEmbeddings
from langchain.embeddings import BedrockEmbeddings
#from chromadb.utils.embedding_functions import create_langchain_embedding
#from langchain.chat_models import ChatOpenAI
from langchain_community.chat_models import ChatOpenAI
#from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_core.documents import Document # with .page_content
#from llama_index.core import Document # with .text
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.history_aware_retriever import create_history_aware_retriever
from langchain.chains.retrieval import create_retrieval_chain
# from langchain.chains import create_retrieval_chain
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_community.chat_models import ChatOpenAI
from llama_index.core.node_parser import SimpleFileNodeParser, MarkdownElementNodeParser
from llama_parse import LlamaParse
from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext
import chromadb
import kotoba.pdf_tools as p_t
#from langchain_pinecone import PineconeVectorStore
#--------------------------------------parse-pdf--------------------------------------------------


try:
import matplotlib.pyplot as plt
    import pymupdf as fitz  # available with v1.24.3
from sklearn.datasets import make_blobs
except ImportError:
from sklearn.cluster import KMeans
    import fitz
from sklearn.metrics import silhouette_score
from pymupdf4llm.helpers.get_text_lines import get_raw_lines, is_white
from sklearn.preprocessing import StandardScaler
from pymupdf4llm.helpers.multi_column import column_boxes
from sklearn import metrics
from collections import defaultdict
import kotoba.knowledge_structure as k_s
import kotoba.chatbot_utils as c_t
import importlib
import networkx as nx
# import nxneo4j as nx
from graphdatascience import GraphDataScience
from langchain.chains import GraphCypherQAChain
from langchain_openai import ChatOpenAI


def pdf2tree(pdf_doc):
llm = c_t.get_llm()
    """Extracts text from PDF.
chain = GraphCypherQAChain.from_llm(graph=graph, llm=llm, verbose=True)
    Args:
response = chain.invoke({"query": "What was the cast of the Casino?"})
        pdf_docs: A PDF document.
    Returns:
        str: The extracted text from the PDF documents.
    """
    from llmsherpa.readers import LayoutPDFReader
    llmsherpa_api_url = "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all"
    pdf_reader = LayoutPDFReader(llmsherpa_api_url)
    doc = pdf_reader.read_pdf(pdf_doc)
    docL = []
    for s in doc.sections():
        sectS = ''
        for p in s.children:
            sectS += p.to_text()
        if sectS == '':
            sectS = '-'
        docL.append(Document(page_content=sectS,metadata={"sect":s.to_context_text(),"lev":s.level}))
    for t in doc.tables():
        docL.append(Document(page_content=t.to_text(),metadata={"table":s.block_idx,"lev":t.level}))
    return docL


def pdf2md(pdf_doc,headers_split=None):
fUrl = "https://www.olympus-ims.com/en/rvi-products/iplex-nx/#!cms[focus]=cmsContent13653"
    """Extracts text from PDF.
driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", os.environ['NEO4J_PASS']))
    Args:
graph = Neo4jGraph("bolt://localhost:7687", "neo4j", os.environ['NEO4J_PASS'])
        pdf_doc: A PDF document.
gds = GraphDataScience("bolt://localhost:7687", auth=("neo4j", os.environ['NEO4J_PASS']))
    Returns:
        str: The extracted text from the PDF documents.
    """
    #from langchain_community.document_loaders import PyMuPDFLoader
    import pymupdf4llm
    import pymupdf
    # hdr_info=lambda s: ... to find the most popular font sizes and derive header levels based on them
    imgDir = pdf_doc.split(".")[0] + "/"
    collN = re.sub(".pdf","",pdf_doc).split("/")[-1]
    hdr_info = p_t.IdentifyHeaders(pdf_doc)
    md_text = pymupdf4llm.to_markdown(pdf_doc,write_images=True,image_path=imgDir,page_chunks=False,hdr_info=hdr_info)
    # parser = LlamaParse(api_key="...",result_type="markdown")
    # documents = parser.load_data("./my_file.pdf")
    #single_sentences_list = re.split(r'(?<=[.?!])\s+', essay)
    if headers_split == None:
        headers_split = [("#","Chapter"),("##","Section"),('###','Subsection')]
        headers_split = [("####","Chapter"),("######","Section"),('########','Subsection')]
    splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_split)#,strip_headers=True,return_each_line=False,)
    docL = splitter.split_text(md_text)
    for i,d in enumerate(docL):
        titleS = "Document: " + collN + "\n".join([x + ": " + d.metadata[x] for x in d.metadata.keys()])
        textS = titleS + "\n" + d.page_content
        docL[i].page_content = textS
    #splitter = RecursiveCharacterTextSplitter(chunk_size = 1000, chunk_overlap=200)
    #splitter = SentenceSplitter(chunk_size=200,chunk_overlap=15)
    #elements = partition_pdf(filename=pdf_doc,strategy="hi_res",infer_table_structure=True,model_name="yolox")
    return docL


def pdf_llama(pdf_doc,collN):
def neo4j_node(driver,G):
     os.environ["LLAMA_CLOUD_API_KEY"] = "llx-"
     nodeL = G.nodes
     llm = get_llm()
    nodeType = "Section {name: STRING, id: STRING}"
    parsing_instructions = '''The document describes IT security policies for audit. It contains many tables. Answer questions using the information in this article and be precise.'''
    queryS = "CREATE IF NOT EXISTS\n"
    documents = LlamaParse(result_type="markdown", parsing_instructions=parsing_instructions).load_data(pdf_doc)
     for n in nodeL:
    print(documents[0].text[:1000])
        g = G.nodes[n]
    node_parser = MarkdownElementNodeParser(llm=llm, num_workers=8).from_defaults()
        sectS = ""
     nodes = node_parser.get_nodes_from_documents(documents)
        for i in ['Chapter','Section','Subsection']:
     base_nodes, objects = node_parser.get_nodes_and_objects(nodes)
            try:
    return base_nodes, objects
                sectS += "%s: %s | " % (i,g[i])
            except:
                pass
               
        s = '(sum_' + str(n) + ': Section {name :"' + sectS + '"}),' + "\n"
        queryS += s
     queryS = queryS[:-2]
    driver.execute_query(queryS)
     gds.run_cypher(queryS)


def pdf_page(pdf_docs,chunk_size=100,chunk_overlap=15):
def neo4j_edge(driver,G):
     """Extracts text from PDF documents.
    #n = G.edges[(k,h)]
     Args:
    edgeL = G.edges
        pdf_docs: A list of PDF documents.
    for e in edgeL:
        edge = edgeL[e]
    #'MATCH ('+str(k)+':Instruction {name: 'Charlie Sheen'}), (oliver:Person {name: 'Oliver Stone'})'
     driver.execute_query('('+str(k)+')-[r:CONTAINS '+str(n)+']->('+str(h)')')
      
def neo4j_graph(driver,collN):
    driver.execute_query("CREATE OR REPLACE DATABASE " + collN )


    Returns:
        str: The extracted text from the PDF documents.
    """
    from PyPDF2 import PdfReader
    text = ""
    docL = []
    for pdf in pdf_docs:
        pdf_reader = PdfReader(pdf)
        for i, page in enumerate(pdf_reader.pages):
            text = page.extract_text()
            docL.append(Document(page_content=text,metadata={"page":i}))
    # text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,chunk_overlap=chunk_overlap)
    # text_chunks = text_splitter.split_text(textL)
    return docL


#--------------------------------------llm-opeerations--------------------------------------------------
   


def create_summary(textL,llm):
gds.run_cypher("""
     chain = ({"doc": lambda x: x}
  CREATE
            | ChatPromptTemplate.from_template("Summarize the following document:\n\n{doc}")
    (m: City {name: "Malmö"}),
            # | ChatOpenAI(max_retries=0)
    (l: City {name: "London"}),
            | llm
     (s: City {name: "San Mateo"}),
            | StrOutputParser())
    (m)-[:FLY_TO]->(l),
    summL = chain.batch(textL, {"max_concurrency": 5})
    (l)-[:FLY_TO]->(m),
    return summL
    (l)-[:FLY_TO]->(s),
    (s)-[:FLY_TO]->(l)
  """)
res = gds.graph.project.estimate(["City"],"FLY_TO",readConcurrency=4)
G, result = gds.graph.project("offices",["City"],"FLY_TO",readConcurrency=4)
G = gds.graph.get("offices")
G.drop()
query = """MATCH (n)-->(m)
    RETURN gds.graph.project($graph_name, n, m, {sourceNodeLabels: $label,targetNodeLabels: $label,relationshipType: $rel_type})"""
G, result = gds.graph.cypher.project(query,database="neo4j",graph_name="offices",label="City",rel_type="FLY_TO")
n = G.node_count()
props = G.node_properties("City")
result = gds.degree.mutate(G, mutateProperty="degree")


def ask_openai(q,retL):
nodeL = pd.read_csv(os.environ['HOME'] + '/lav/soft/raw/got-s1-nodes.csv')
    chain = ({"doc": lambda x: x}
edgeL = pd.read_csv(os.environ['HOME'] + '/lav/soft/raw/got-s1-edges.csv')
            | ChatPromptTemplate.from_template("The following document answers "+q+":\n\n{doc} \n\n Answer your confidence")
            | ChatOpenAI(max_retries=0)
            | StrOutputParser())
    summaries = chain.batch(retL, {"max_concurrency": 5})
    return summaries


def ask_bedrock_image(f,baseDir):
    client = boto3.client("bedrock-runtime", region_name="us-east-1")
    model_id = "amazon.titan-text-lite-v1"
    with open(baseDir + "/" + f, 'rb') as image_file:
        encoded_image = base64.b64encode(image_file.read()).decode()


     model_id = "anthropic.claude-3-haiku-20240307-v1:0"
def pd2ndeo(nodeL=None,linkL=None):
    payload = {"messages": [{"role": "user","content": [{"type": "image","source": {"type": "base64","media_type": "image/jpeg","data": encoded_image}},{"type": "text","text": "Describe the content of this image"}]}],"max_tokens": 1000,"anthropic_version": "bedrock-2023-05-31"}
     if nodeL == None:
    response = client.invoke_model(modelId=model_id,contentType="application/json",body=json.dumps(payload))
        nodeL = pd.DataFrame({"nodeId": [0, 1, 2, 3],"labels": ["A", "B", "C", "A"],"prop1": [42, 1337, 8, 0],"otherProperty": [0.1, 0.2, 0.3, 0.4]})
     output_binary = response["body"].read()
    if linkL == None:
    output_json = json.loads(output_binary)
        linkL = pd.DataFrame({"sourceNodeId": [0, 1, 2, 3],"targetNodeId": [1, 2, 3, 0],"relationshipType": ["REL", "REL", "REL", "REL"],"weight": [0.0, 0.0, 0.1, 42.0]})
    output = output_json["content"][0]["text"]
     G = gds.graph.construct("grid",nodeL,linkL)
     return output
     return G


def image_description(baseDir,fL):
    imgL = []
    for f in fL:
        print(f)
        caption = ask_bedrock_image(f,baseDir)
        imgL.append(Document(page_content=caption,metadata={"image_file":f}))
    return imgL


def netx2neo(nx_G = None):
    if nx_G == None:
        nx_G = nx.DiGraph()
        nx_G.add_node(1, labels=["Person"], age=52)
        nx_G.add_node(42, labels=["Product", "Item"], cost=17.2)
        nx_G.add_edge(1, 42, relationshipType="BUYS", quantity=4)
    G = gds.graph.networkx.load(nx_G, "purchases")
    return G


def rank_openai(resL):
    doc = ".".join([str(i) + ") " + x for i,x in enumerate(resL)])   
    chain = ({"doc": lambda x: x}
            | ChatPromptTemplate.from_template("What answer is the most confident in the following series:\n\n{doc}")
            | ChatOpenAI(max_retries=0)
            | StrOutputParser())
    summaries = chain.batch([doc], {"max_concurrency": 1})
    return summaries


def get_llm():
importlib.reload(c_t)
     llm = ChatOpenAI()
def build_document_graph(summL,collN,baseDir):
     return llm
     from collections import defaultdict
 
     def tree(): return defaultdict(tree)
def get_llm_bedrock(model_id="anthropic.claude-3-sonnet-20240229-v1:0"):
     sL = ['Chapter', 'Section', 'Subsection','id']
     boto3_session = boto3.Session(region_name='us-east-1')
     treeD = tree()
    bedrock_runtime = boto3_session.client(service_name="bedrock-runtime")
     for i in summL:
    llm = ChatBedrock(client=bedrock_runtime,model_id=model_id,
        d = dict(i.metadata)
                      model_kwargs={'temperature': 0},streaming=True,)
        for s in sL:
    return llm
            if s not in d:
 
                d[s] = ''
def get_embeddings_bedrock():
        m = {"page_content":i.page_content,"metadata":i.metadata}
    bedrock_client = boto3.client(service_name='bedrock-runtime',region_name='us-east-1')
        treeD[d[sL[0]]][d[sL[1]]][d[sL[2]]] = m
     bedrock_embeddings = BedrockEmbeddings(model_id="amazon.titan-embed-text-v1",client=bedrock_client)
      
     return bedrock_embeddings
     G = nx.DiGraph(name="document_graph")
 
     # G = nx.DiGraph(driver)
def get_embeddings_openai():
     G.add_node(0,type="document")
    openai_ef = embedding_functions.OpenAIEmbeddingFunction(model_name="text-embedding-ada-002",api_key=os.environ['OPENAI_API_KEY'])
     for k1 in treeD.keys():
    return openai_ef
        G.add_node(k1,type=sL[0])
 
         for k2 in treeD[k1].keys():
def get_embeddings_hugging():
            G.add_node(k2,type=sL[1])
    langchain_embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
            G.add_edge(k1,k2)
    embeddings = create_langchain_embedding(langchain_embeddings)
            for k3 in treeD[k1][k2].keys():
    return embeddings
                G.add_node(k3,type=sL[2],text=treeD[k1][k2][k3]['page_content'])
 
                G.add_edge(k2,k3)
def get_embeddings():
       
    """pointer to preferred option"""
     if False:
    #return get_embeddings_bedrock()
        #nx.draw_kamada_kawai(G,with_labels = True)
     return get_embeddings_hugging()
         nx.draw_spring(G,with_labels = True)
 
         plt.show()
def get_chat_history(retriever):
     nx.write_graphml(G,baseDir + collN + ".graphml")
     rephrase_prompt = hub.pull("langchain-ai/chat-langchain-rephrase")
     nx.pagerank(G)
     llm = ChatOpenAI()
     nx.betweenness_centrality(G)
    chain = create_history_aware_retriever(llm, retriever, rephrase_prompt)
     nx.closeness_centrality(G)
     #chain.invoke({"input": "...", "chat_history": })
     return chain
 
def get_chat_message() -> BaseChatMessageHistory:
    return ChatMessageHistory()
 
#--------------------------------------vector-storage--------------------------------------------------
 
def embed_text(docL):
    try:
        textL = [x.page_content for x in docL]      
    except:
         textL = [x.text for x in docL]
    embeddings = get_embeddings()
    embdL = embeddings.embed_documents(textL)
    return embdL
 
def create_collection(docL,collN,baseDir):
    """create two collections from a pdf.
    Args:
        pdf_doc: A PDF document.
    Returns:
        collT: collection of texts
    """
    #from langchain.vectorstores import Chroma
    #from langchain_community.vectorstores import Chroma
    from langchain_chroma import Chroma
    from chromadb.utils import embedding_functions
    idL = ["%06d" % x for x in range(len(docL))]
    try:
        textL = [x.page_content for x in docL]      
    except:
        textL = [x.text for x in docL]
    metaL = [x.metadata for x in docL]
    for i in range(len(docL)):
        metaL[i]['id'] = idL[i]
    client = chromadb.PersistentClient(path=baseDir + "/chroma")
     embeddings = get_embeddings()
    # embdL = embeddings.embed_documents(textL)
    try:
         client.delete_collection(name=collN)
    except:
         pass
     collT = client.create_collection(name=collN,metadata={"hnsw:space":"cosine"},embedding_function=embeddings)
     #collT.add(embeddings=embdL,documents=textL,metadatas=metaL,ids=idL)
     collT.add(documents=textL,metadatas=metaL,ids=idL)
     return collT


def load_chroma(collN,baseDir):
    some_dict = {'a': 1, 'b': 2}
     client = chromadb.PersistentClient(path=baseDir + "/chroma")
     session = driver.session()
     collT = client.get_or_create_collection(name=collN,metadata={"hnsw:space":"cosine","hnsw:M": 32})
     session.run(query="CREATE (x) SET x = {dict_param}",parameters={'dict_param': some_dict})
    return collT


def get_chroma_retriever(collN,baseDir):  
def build_knowledge_graph(summL,collN,baseDir):
     client = chromadb.PersistentClient(path=baseDir + "chroma/")
     embdL = c_t.embed_text(summL)
     col = client.get_or_create_collection(collN)
    kmeans = KMeans(init="random",n_clusters=15,n_init=10,max_iter=300,random_state=42)
     embeddings = get_embeddings()
     kmeans.fit(embdL)
     db = Chroma(client=client, collection_name=collN, embedding_function=embeddings)
     clustL = kmeans.labels_
    retriever = db.as_retriever()
    treeD = defaultdict(list)
    return retriever
     for i,j in enumerate(clustL):
        treeD[j].append(summL[i])


def list_collection(baseDir):
    print([len(treeD[x]) for x in treeD.keys()])
     client = chromadb.PersistentClient(path=baseDir + "chroma/")
    treeL = []
     collL = [c.name for c in client.list_collections()]
     G = nx.DiGraph(name="knowledge_graph")
    print(collL)
     # G = nx.DiGraph(driver)
    return collL
    G.add_node("0",name="document",id="0",Chapter=collN)
    for k in treeD.keys():
        treeL.append("\n".join([x.page_content for x in treeD[k]]))
        G.add_node(k,**x.metadata)
        G.add_edge('0',k)
        for x in treeD[k]:
            i = x.metadata['id']
            G.add_node(i,**x.metadata)
            G.add_edge(k,i)


def create_neo4j(docL,collN,baseDir,neopass):
    if False:
    from neo4j import GraphDatabase
        nx.draw_kamada_kawai(G,with_labels = True)
    from neo4j_graphrag.indexes import create_vector_index
        plt.show()
    from neo4j_graphrag.indexes import upsert_vector
     nx.write_graphml(G,baseDir + collN + ".graphml")
    driver = GraphDatabase.driver("neo4j://localhost:7687", auth=("neo4j",neopass))
     nx.pagerank(G)
    create_vector_index(driver,collN,label="Chunk",embedding_property="embedding",dimensions=3072,similarity_fn="euclidean")
     nx.betweenness_centrality(G)
     try:
     nx.closeness_centrality(G)
        textL = [x.page_content for x in docL]       
    except:
        textL = [x.text for x in docL]
    metaL = [x.metadata for x in docL]
    client = chromadb.PersistentClient(path=baseDir + "/chroma")
     embeddings = get_embeddings()
     embdL = embeddings.embed_documents(textL)
    upsert_vector(driver,node_id=0,embedding_property="embedding",vector=embdL,)
     driver.close()


def search_neo4j(q,llm,collN,neopass):
    from neo4j import GraphDatabase
    from neo4j_graphrag.generation import GraphRAG
    from neo4j_graphrag.retrievers import VectorRetriever
    driver = GraphDatabase.driver("neo4j://localhost:7687", auth=("neo4j",neopass))
    embeddings = get_embeddings()
    retriever = VectorRetriever(driver, collN, embeddings)
    rag = GraphRAG(retriever=retriever, llm=llm)
    #qV = embeddings.embed_documents(q)
    response = rag.search(query_text=q, retriever_config={"top_k": 5})
    driver.close()
    return response
   
def faiss_vector_storage(docL,collN,baseDir):
    """Creates a FAISS vector store from the given text chunks.
    Args:
        text_chunks: A list of text chunks to be vectorized.
    Returns:
        FAISS: A FAISS vector store.
    """
    from llama_index.vector_stores.faiss import FaissVectorStore
    from langchain_community.vectorstores import FAISS
    # from langchain.vectorstores import FAISS
    # from langchain.indexes.vectorstore import VectorStoreIndexWrapper
    import faiss
    try:
        textL = [x.text for x in docL]
    except:
        textL = [x.page_content for x in docL]       
    metaL = [x.metadata for x in docL]
    faiss_index = faiss.IndexFlatL2(1536) # dimensions of text-ada-embedding-002
    embeddings = get_embeddings()
    # vectorstore_faiss = FAISS.from_documents(docs,bedrock_embeddings)
    # Store the Faiss index to a file
    # faiss.write_index(vectorstore_faiss.index, "../../data/index/prompt_embeddings.index")
    vector_store = FAISS.from_texts(textL, embedding=embeddings)
    vector_store.save_local(baseDir + "faiss/" + collN)
    #vector_store = FaissVectorStore(faiss_index=faiss_index)
    #storage_context = StorageContext.from_defaults(vector_store=vector_store)
    #index = VectorStoreIndex.from_documents(docL, storage_context=storage_context)
    #index.storage_context.persist(persist_dir=baseDir+"./faiss")   
    #return index
    return vector_store


def qdrant_vector_storage(docL,collN,baseDir):
if False: #categorical metrics
     """Creates a qdrant vector store from the given text chunks.
    scores = defaultdict(list)
    Args:
     scores["Homogeneity"].append(metrics.homogeneity_score(labels, kmeans.labels_))
        docL: document list
     scores["Completeness"].append(metrics.completeness_score(labels, kmeans.labels_))
        collN: collection name
     scores["V-measure"].append(metrics.v_measure_score(labels, kmeans.labels_))
        baseDir: directory for persistent storage
     scores["Adjusted Rand-Index"].append(metrics.adjusted_rand_score(labels, km.labels_))
    Returns:
     scores["Silhouette Coefficient"].append(metrics.silhouette_score(X, km.labels_, sample_size=2000))
      A vector store.
     """
    from qdrant_client import QdrantClient
    from qdrant_client.models import PointStruct
    client = QdrantClient(host="localhost", port=6333)
     if not client.collection_exists(collN):
        client.create_collection(collection_name=collN,vectors_config=VectorParams(size=100, distance=Distance.COSINE))
     pointL = [PointStruct(id=idx,vector=vector.tolist(),payload={"color": "red", "rand_number": idx % 10})]
    for idx, vector in enumerate(docL):
        client.upsert(collection_name=collN,points=pointL)
     #hits = client.search(collection_name=collN,query_vector=query_vector,limit=5)
    return client


def elastic_vector_storage(docL,collN,baseDir):
    """Creates a elasticsearch vector store from the given text chunks.
    Args:
        text_chunks: A list of text chunks to be vectorized.
    Returns:
        elastic search vector store.
    """
    from llama_index.vector_stores.elasticsearch import ElasticsearchStore, AsyncDenseVectorStrategy
    from llama_index.core import StorageContext, VectorStoreIndex
    vector_store = ElasticsearchStore(index_name=collN,es_url="http://localhost:9200",retrieval_strategy=AsyncDenseVectorStrategy())
    storage_context = StorageContext.from_defaults(vector_store=vector_store)
    index = VectorStoreIndex(docL, storage_context=storage_context)
    # retriever = index.as_retriever()
    # results = retriever.retrieve(query)
    # query_engine = index.as_query_engine()
    # response = query_engine.query(query)
    return index


def load_faiss(collN,baseDir):
    embeddings = get_embeddings()
    vector_store = FAISS.load_local(baseDir+"faiss/"+collN, embeddings, allow_dangerous_deserialization=True)
    vector_store = FaissVectorStore.from_persist_dir(baseDir+"faiss/"+collN)
    storage_context = StorageContext.from_defaults(vector_store=vector_store, persist_dir=baseDir+"faiss/"+collN)
    index = load_index_from_storage(storage_context=storage_context)
    return index




def pinecone_vector_storage(pdf_doc,baseDir):
# Import movie information
    """Creates a Pinecone vector store from the given text chunks.
    Args:
        text_chunks: A list of text chunks to be vectorized.
    Returns:
        PineconeVectorStore: A Pinecone vector store.
    """
    vector_store = None
    os.environ['PINECONE_API_KEY'] = st.session_state.pinecone_api_key
    if st.session_state.embedding_model == "HuggingFaceEmbeddings":
        embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
        try:
            # Clear existing index data if there's any
            PineconeVectorStore.from_existing_index(
                index_name=st.session_state.pinecone_index,
                embedding=embeddings
            ).delete(delete_all=True)
        except Exception as e:
            print("The index is empty")
        finally:
            vector_store = PineconeVectorStore.from_texts(
                text_chunks,
                embedding=embeddings,
                index_name=st.session_state.pinecone_index
            )
    return vector_store
   
#--------------------------------------chains--------------------------------------------------


def section_summary(docL,llm):
movies_query = """
    """create two collections from a pdf, chapter wise and their summaries.
LOAD CSV WITH HEADERS FROM
    Args:
'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies_small.csv'
        pdf_doc: A PDF document.
AS row
    Returns:
MERGE (m:Movie {id:row.movieId})
        collT, collS: collection of texts and theirs summaries
SET m.released = date(row.released),
     """
     m.title = row.title,
     try:
     m.imdbRating = toFloat(row.imdbRating)
        textL = [x.page_content for x in docL]       
FOREACH (director in split(row.director, '|') |
     except:
     MERGE (p:Person {name:trim(director)})
        textL = [x.text for x in docL]
    MERGE (p)-[:DIRECTED]->(m))
    metaL = [x.metadata for x in docL]
FOREACH (actor in split(row.actors, '|') |
     idL = ["%06d" % x for x in range(len(textL))]
     MERGE (p:Person {name:trim(actor)})
     summL = create_summary(textL,llm)
     MERGE (p)-[:ACTED_IN]->(m))
    sumL = []
FOREACH (genre in split(row.genres, '|') |
    for i,x in enumerate(summL):
    MERGE (g:Genre {name:trim(genre)})
        sumL.append(Document(page_content=x,metadata=metaL[i]))
    MERGE (m)-[:IN_GENRE]->(g))
    return sumL
"""


def format_docL(docs):
graph.query(movies_query)
    """Formats the given documents into a list."""
    return [doc for doc in docs]


def format_docs(docs):
  return "\n\n".join(doc.page_content for doc in docs)


def get_vectorstore(collN,baseDir):
with open(baseDir + fName + '.html') as fByte:
  embeddings = get_embeddings()
    fString = fByte.read()
  # vectorstore = Chroma.from_documents(documents, openai)
response = requests.get(fUrl)  
  client = chromadb.PersistentClient(path=baseDir + "/chroma")
soup = BeautifulSoup(response.text, "html.parser")
  db = Chroma(client=client,embedding_function=embeddings,collection_name=collN,collection_metadata={"hnsw:space":"cosine"})
paragraphs = soup.find_all("p")
  #con = db.similarity_search_with_relevance_scores(q)
text = " ".join([p.get_text() for p in paragraphs])
  return db


def get_retrieval_qa(collN,baseDir):
user_input = "spark"
    db = c_t.get_vectorstore(collN,baseDir)
openai.api_key = os.environ['OPENAI_API_KEY']
    qa = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0),chain_type="stuff",retriever=db.as_retriever(),return_source_documents=True,)
prompt = f"Help me understand following by describing as a detailed knowledge graph: {user_input}"
     return qa
completion: KnowledgeGraph = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k",messages=[{"role": "user","content": prompt,}],response_model=KnowledgeGraph,)
response_data = completion.model_dump()
edges = response_data["edges"]
def _restore(e):
    e["from"] = e["from_"]
     return e


def get_chain_confidence(llm,collN,baseDir):
response_data["edges"] = [_restore(e) for e in edges]
  prompt = PromptTemplate(input_variables=["question","context"], template=c_p.promptConf)
results = driver.get_response_data(response_data)
  db = get_vectorstore(collN,baseDir)
  chain = ({'context': db.as_retriever(search_kwargs={'k':5}) | format_docs, "question": RunnablePassthrough()} | prompt | llm | c_p.parserS)
  # chain = ({'context': db.as_retriever(search_kwargs={'k':3}) | format_docs, "question": RunnablePassthrough()} | prompt | llm)
  return chain


def format_confidence(res):
dot = Digraph(comment="Knowledge Graph")
    try:
response_dict = response_data
        res['answer'] = bool(c_p.yesRe.match(res['answer']))
for node in response_dict.get("nodes", []):
        res['confidence'] = float(res['confidence'])
    dot.node(node["id"], f"{node['label']} ({node['type']})")
    except:
        pass
    return res


def chain_inspect(model, retriever, question):
for edge in response_dict.get("edges", []):
    def inspect(state):
     dot.edge(edge["from"], edge["to"], label=edge["relationship"])
        """Print the state passed between Runnables in a langchain and pass it on"""
        print(state)
        return state
   
    template = """Answer the question based only on the following context:
    {context}
    Question: {question}
    """
     prompt = ChatPromptTemplate.from_template(template)
    chain = (
        {"context": retriever, "question": RunnablePassthrough()}
        | RunnableLambda(inspect)  # Add the inspector here to print the intermediate results
        | prompt
        | model
        | StrOutputParser()
    )
    resp = chain.invoke("what is a data process agreement?")
    return resp


def create_conversational_rag_chain(model, retriever, get_history, agentDef=None):
dot.render("knowledge_graph.gv", view=False)
    """
dot.format = "png"
    Creates a conversational RAG chain. This is a question-answering (QA) system with the ability to consider historical context.
dot.render("static/knowledge_graph", view=False)
    Parameters:
png_url = f"{request.url_root}static/knowledge_graph.png"
    model: The model selected by the user.
    retriever: The retriever to use for fetching relevant documents.
    Returns:
    RunnableWithMessageHistory: The conversational chain that generates the answer to the query.
    """
    contextualize_q_system_prompt = """Given a chat history and the latest user question \
    which might reference context in the chat history, formulate a standalone question \
    which can be understood without the chat history. Do NOT answer the question, \
    just reformulate it if needed and otherwise return it as is."""
    contextualize_q_prompt = ChatPromptTemplate.from_messages([("system", contextualize_q_system_prompt),MessagesPlaceholder("chat_history"),("human", "{input}"),])
    history_aware_retriever = create_history_aware_retriever(model,retriever | format_docL, contextualize_q_prompt)
    if agentDef == None:
        agentDef = "You are an assistant for question-answering tasks. \n"
    qa_system_prompt = (agentDef + "Use the following pieces of retrieved context to answer the question. "
                    "If you don't know the answer, say that you don't know. "
                    # "Use three sentences maximum and keep the answer concise."
                    "\n\n"
                    "{context}")
    #prompt = ChatPromptTemplate.from_messages([("system", qa_system_prompt),("human", "{input}"),])
    qa_prompt = ChatPromptTemplate.from_messages([("system",qa_system_prompt),MessagesPlaceholder("chat_history"),("human", "{input}"),])
    question_answer_chain = create_stuff_documents_chain(model, qa_prompt)
    # rag_chain = create_retrieval_chain(retriever, question_answer_chain)
    rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
    conversational_rag_chain = RunnableWithMessageHistory(rag_chain,get_history,input_messages_key="input",history_messages_key="chat_history",output_messages_key="answer",)
    return conversational_rag_chain


def create_qa_chain(model, retriever, agentDef=None):
(nodes, edges) = driver.get_graph_data()
     """
response_dict = response_data
    Creates a question-answering (QA) chain for a chatbot without considering historical context.
nodes = [
    Parameters:
     {
    model: The model selected by the user.
        "data": {
    retriever: The retriever to use for fetching relevant documents.
            "id": node["id"],
    Returns:
            "label": node["label"],
     chain: it takes a user's query as input and produces a chatbot's response as output.
            "color": node.get("color", "defaultColor"),
     """
        }
     if agentDef == None:
     }
         agentDef = "You are an assistant for question-answering tasks. \n"
     for node in response_dict["nodes"]
    qa_system_prompt = agentDef + """Use the following pieces of retrieved context to answer the question. \
]
    If you don't know the answer, just say that you don't know. \
edges = [
    {context}"""
     {
    qa_prompt_no_memory = ChatPromptTemplate.from_messages([("system", qa_system_prompt),("human", "{input}"),])
         "data": {
     question_answer_chain = create_stuff_documents_chain(model, qa_prompt_no_memory)
            "source": edge["from"],
     chain = create_retrieval_chain(retriever, question_answer_chain)
            "target": edge["to"],
    return chain
            "label": edge["relationship"],
            "color": edge.get("color", "defaultColor"),
        }
     }
     for edge in response_dict["edges"]
]
graphD = jsonify({"elements": {"nodes": nodes, "edges": edges}})

Revision as of 12:04, 6 November 2024

import re, json, os, sys import instructor import openai import requests from graphviz import Digraph from langchain_community.graphs import Neo4jGraph from neo4j import GraphDatabase

import matplotlib.pyplot as plt from sklearn.datasets import make_blobs from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler from sklearn import metrics from collections import defaultdict import kotoba.knowledge_structure as k_s import kotoba.chatbot_utils as c_t import importlib import networkx as nx

  1. import nxneo4j as nx

from graphdatascience import GraphDataScience from langchain.chains import GraphCypherQAChain from langchain_openai import ChatOpenAI

llm = c_t.get_llm() chain = GraphCypherQAChain.from_llm(graph=graph, llm=llm, verbose=True) response = chain.invoke({"query": "What was the cast of the Casino?"})

fUrl = "https://www.olympus-ims.com/en/rvi-products/iplex-nx/#!cms[focus]=cmsContent13653" driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", os.environ['NEO4J_PASS'])) graph = Neo4jGraph("bolt://localhost:7687", "neo4j", os.environ['NEO4J_PASS']) gds = GraphDataScience("bolt://localhost:7687", auth=("neo4j", os.environ['NEO4J_PASS']))

def neo4j_node(driver,G):

   nodeL = G.nodes
   nodeType = "Section {name: STRING, id: STRING}"
   queryS = "CREATE IF NOT EXISTS\n"
   for n in nodeL:
       g = G.nodes[n]
       sectS = ""
       for i in ['Chapter','Section','Subsection']:
           try:
               sectS += "%s: %s | " % (i,g[i])
           except:
               pass
               
       s = '(sum_' + str(n) + ': Section {name :"' + sectS + '"}),' + "\n"
       queryS += s
   queryS = queryS[:-2]
   driver.execute_query(queryS)
   gds.run_cypher(queryS)

def neo4j_edge(driver,G):

   #n = G.edges[(k,h)]
   edgeL = G.edges
   for e in edgeL:
       edge = edgeL[e]
   #'MATCH ('+str(k)+':Instruction {name: 'Charlie Sheen'}), (oliver:Person {name: 'Oliver Stone'})'
   driver.execute_query('('+str(k)+')-[r:CONTAINS '+str(n)+']->('+str(h)')')
   

def neo4j_graph(driver,collN):

   driver.execute_query("CREATE OR REPLACE DATABASE " + collN )



gds.run_cypher("""

 CREATE
   (m: City {name: "Malmö"}),
   (l: City {name: "London"}),
   (s: City {name: "San Mateo"}),
   (m)-[:FLY_TO]->(l),
   (l)-[:FLY_TO]->(m),
   (l)-[:FLY_TO]->(s),
   (s)-[:FLY_TO]->(l)
 """)

res = gds.graph.project.estimate(["City"],"FLY_TO",readConcurrency=4) G, result = gds.graph.project("offices",["City"],"FLY_TO",readConcurrency=4) G = gds.graph.get("offices") G.drop() query = """MATCH (n)-->(m)

   RETURN gds.graph.project($graph_name, n, m, {sourceNodeLabels: $label,targetNodeLabels: $label,relationshipType: $rel_type})"""

G, result = gds.graph.cypher.project(query,database="neo4j",graph_name="offices",label="City",rel_type="FLY_TO") n = G.node_count() props = G.node_properties("City") result = gds.degree.mutate(G, mutateProperty="degree")

nodeL = pd.read_csv(os.environ['HOME'] + '/lav/soft/raw/got-s1-nodes.csv') edgeL = pd.read_csv(os.environ['HOME'] + '/lav/soft/raw/got-s1-edges.csv')


def pd2ndeo(nodeL=None,linkL=None):

   if nodeL == None:
       nodeL = pd.DataFrame({"nodeId": [0, 1, 2, 3],"labels":  ["A", "B", "C", "A"],"prop1": [42, 1337, 8, 0],"otherProperty": [0.1, 0.2, 0.3, 0.4]})
   if linkL == None:
       linkL = pd.DataFrame({"sourceNodeId": [0, 1, 2, 3],"targetNodeId": [1, 2, 3, 0],"relationshipType": ["REL", "REL", "REL", "REL"],"weight": [0.0, 0.0, 0.1, 42.0]})
   G = gds.graph.construct("grid",nodeL,linkL)
   return G


def netx2neo(nx_G = None):

   if nx_G == None:
       nx_G = nx.DiGraph()
       nx_G.add_node(1, labels=["Person"], age=52)
       nx_G.add_node(42, labels=["Product", "Item"], cost=17.2)
       nx_G.add_edge(1, 42, relationshipType="BUYS", quantity=4)
   G = gds.graph.networkx.load(nx_G, "purchases")
   return G


importlib.reload(c_t) def build_document_graph(summL,collN,baseDir):

   from collections import defaultdict
   def tree(): return defaultdict(tree)
   sL = ['Chapter', 'Section', 'Subsection','id']
   treeD = tree()
   for i in summL:
       d = dict(i.metadata)
       for s in sL:
           if s not in d:
               d[s] = 
       m = {"page_content":i.page_content,"metadata":i.metadata}
       treeD[d[sL[0]]][d[sL[1]]][d[sL[2]]] = m
   
   G = nx.DiGraph(name="document_graph")
   # G = nx.DiGraph(driver)
   G.add_node(0,type="document")
   for k1 in treeD.keys():
       G.add_node(k1,type=sL[0])
       for k2 in treeD[k1].keys():
           G.add_node(k2,type=sL[1])
           G.add_edge(k1,k2)
           for k3 in treeD[k1][k2].keys():
               G.add_node(k3,type=sL[2],text=treeD[k1][k2][k3]['page_content'])
               G.add_edge(k2,k3)
       
   if False:
       #nx.draw_kamada_kawai(G,with_labels = True)
       nx.draw_spring(G,with_labels = True)
       plt.show()
   nx.write_graphml(G,baseDir + collN + ".graphml")
   nx.pagerank(G)
   nx.betweenness_centrality(G)
   nx.closeness_centrality(G)
   some_dict = {'a': 1, 'b': 2}
   session = driver.session()
   session.run(query="CREATE (x) SET x = {dict_param}",parameters={'dict_param': some_dict})

def build_knowledge_graph(summL,collN,baseDir):

   embdL = c_t.embed_text(summL)
   kmeans = KMeans(init="random",n_clusters=15,n_init=10,max_iter=300,random_state=42)
   kmeans.fit(embdL)
   clustL = kmeans.labels_
   treeD = defaultdict(list)
   for i,j in enumerate(clustL):
       treeD[j].append(summL[i])
   print([len(treeD[x]) for x in treeD.keys()])
   treeL = []
   G = nx.DiGraph(name="knowledge_graph")
   # G = nx.DiGraph(driver)
   G.add_node("0",name="document",id="0",Chapter=collN)
   for k in treeD.keys():
       treeL.append("\n".join([x.page_content for x in treeD[k]]))
       G.add_node(k,**x.metadata)
       G.add_edge('0',k)
       for x in treeD[k]:
           i = x.metadata['id']
           G.add_node(i,**x.metadata)
           G.add_edge(k,i)
   if False:
       nx.draw_kamada_kawai(G,with_labels = True)
       plt.show()
   nx.write_graphml(G,baseDir + collN + ".graphml")
   nx.pagerank(G)
   nx.betweenness_centrality(G)
   nx.closeness_centrality(G)


if False: #categorical metrics

   scores = defaultdict(list)
   scores["Homogeneity"].append(metrics.homogeneity_score(labels, kmeans.labels_))
   scores["Completeness"].append(metrics.completeness_score(labels, kmeans.labels_))
   scores["V-measure"].append(metrics.v_measure_score(labels, kmeans.labels_))
   scores["Adjusted Rand-Index"].append(metrics.adjusted_rand_score(labels, km.labels_))
   scores["Silhouette Coefficient"].append(metrics.silhouette_score(X, km.labels_, sample_size=2000))



  1. Import movie information

movies_query = """ LOAD CSV WITH HEADERS FROM 'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies_small.csv' AS row MERGE (m:Movie {id:row.movieId}) SET m.released = date(row.released),

   m.title = row.title,
   m.imdbRating = toFloat(row.imdbRating)

FOREACH (director in split(row.director, '|') |

   MERGE (p:Person {name:trim(director)})
   MERGE (p)-[:DIRECTED]->(m))

FOREACH (actor in split(row.actors, '|') |

   MERGE (p:Person {name:trim(actor)})
   MERGE (p)-[:ACTED_IN]->(m))

FOREACH (genre in split(row.genres, '|') |

   MERGE (g:Genre {name:trim(genre)})
   MERGE (m)-[:IN_GENRE]->(g))

"""

graph.query(movies_query)


with open(baseDir + fName + '.html') as fByte:

   fString = fByte.read()

response = requests.get(fUrl) soup = BeautifulSoup(response.text, "html.parser") paragraphs = soup.find_all("p") text = " ".join([p.get_text() for p in paragraphs])

user_input = "spark" openai.api_key = os.environ['OPENAI_API_KEY'] prompt = f"Help me understand following by describing as a detailed knowledge graph: {user_input}" completion: KnowledgeGraph = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k",messages=[{"role": "user","content": prompt,}],response_model=KnowledgeGraph,) response_data = completion.model_dump() edges = response_data["edges"] def _restore(e):

   e["from"] = e["from_"]
   return e

response_data["edges"] = [_restore(e) for e in edges] results = driver.get_response_data(response_data)

dot = Digraph(comment="Knowledge Graph") response_dict = response_data for node in response_dict.get("nodes", []):

   dot.node(node["id"], f"{node['label']} ({node['type']})")

for edge in response_dict.get("edges", []):

   dot.edge(edge["from"], edge["to"], label=edge["relationship"])

dot.render("knowledge_graph.gv", view=False) dot.format = "png" dot.render("static/knowledge_graph", view=False) png_url = f"{request.url_root}static/knowledge_graph.png"

(nodes, edges) = driver.get_graph_data() response_dict = response_data nodes = [

   {
       "data": {
           "id": node["id"],
           "label": node["label"],
           "color": node.get("color", "defaultColor"),
       }
   }
   for node in response_dict["nodes"]

] edges = [

   {
       "data": {
           "source": edge["from"],
           "target": edge["to"],
           "label": edge["relationship"],
           "color": edge.get("color", "defaultColor"),
       }
   }
   for edge in response_dict["edges"]

] graphD = jsonify({"elements": {"nodes": nodes, "edges": edges}})