import re, json, os, sys import instructor import openai import requests from graphviz import Digraph from langchain_community.graphs import Neo4jGraph from neo4j import GraphDatabase
import matplotlib.pyplot as plt from sklearn.datasets import make_blobs from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler from sklearn import metrics from collections import defaultdict import kotoba.knowledge_structure as k_s import kotoba.chatbot_utils as c_t import importlib import networkx as nx
- import nxneo4j as nx
from graphdatascience import GraphDataScience from langchain.chains import GraphCypherQAChain from langchain_openai import ChatOpenAI
llm = c_t.get_llm() chain = GraphCypherQAChain.from_llm(graph=graph, llm=llm, verbose=True) response = chain.invoke({"query": "What was the cast of the Casino?"})
fUrl = "https://www.olympus-ims.com/en/rvi-products/iplex-nx/#!cms[focus]=cmsContent13653" driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", os.environ['NEO4J_PASS'])) graph = Neo4jGraph("bolt://localhost:7687", "neo4j", os.environ['NEO4J_PASS']) gds = GraphDataScience("bolt://localhost:7687", auth=("neo4j", os.environ['NEO4J_PASS']))
def neo4j_node(driver,G):
nodeL = G.nodes
nodeType = "Section {name: STRING, id: STRING}"
queryS = "CREATE IF NOT EXISTS\n"
for n in nodeL:
g = G.nodes[n]
sectS = ""
for i in ['Chapter','Section','Subsection']:
try:
sectS += "%s: %s | " % (i,g[i])
except:
pass
s = '(sum_' + str(n) + ': Section {name :"' + sectS + '"}),' + "\n"
queryS += s
queryS = queryS[:-2]
driver.execute_query(queryS)
gds.run_cypher(queryS)
def neo4j_edge(driver,G):
#n = G.edges[(k,h)]
edgeL = G.edges
for e in edgeL:
edge = edgeL[e]
#'MATCH ('+str(k)+':Instruction {name: 'Charlie Sheen'}), (oliver:Person {name: 'Oliver Stone'})'
driver.execute_query('('+str(k)+')-[r:CONTAINS '+str(n)+']->('+str(h)')')
def neo4j_graph(driver,collN):
driver.execute_query("CREATE OR REPLACE DATABASE " + collN )
gds.run_cypher("""
CREATE
(m: City {name: "Malmö"}),
(l: City {name: "London"}),
(s: City {name: "San Mateo"}),
(m)-[:FLY_TO]->(l),
(l)-[:FLY_TO]->(m),
(l)-[:FLY_TO]->(s),
(s)-[:FLY_TO]->(l)
""")
res = gds.graph.project.estimate(["City"],"FLY_TO",readConcurrency=4) G, result = gds.graph.project("offices",["City"],"FLY_TO",readConcurrency=4) G = gds.graph.get("offices") G.drop() query = """MATCH (n)-->(m)
RETURN gds.graph.project($graph_name, n, m, {sourceNodeLabels: $label,targetNodeLabels: $label,relationshipType: $rel_type})"""
G, result = gds.graph.cypher.project(query,database="neo4j",graph_name="offices",label="City",rel_type="FLY_TO") n = G.node_count() props = G.node_properties("City") result = gds.degree.mutate(G, mutateProperty="degree")
nodeL = pd.read_csv(os.environ['HOME'] + '/lav/soft/raw/got-s1-nodes.csv') edgeL = pd.read_csv(os.environ['HOME'] + '/lav/soft/raw/got-s1-edges.csv')
def pd2ndeo(nodeL=None,linkL=None):
if nodeL == None:
nodeL = pd.DataFrame({"nodeId": [0, 1, 2, 3],"labels": ["A", "B", "C", "A"],"prop1": [42, 1337, 8, 0],"otherProperty": [0.1, 0.2, 0.3, 0.4]})
if linkL == None:
linkL = pd.DataFrame({"sourceNodeId": [0, 1, 2, 3],"targetNodeId": [1, 2, 3, 0],"relationshipType": ["REL", "REL", "REL", "REL"],"weight": [0.0, 0.0, 0.1, 42.0]})
G = gds.graph.construct("grid",nodeL,linkL)
return G
def netx2neo(nx_G = None):
if nx_G == None:
nx_G = nx.DiGraph()
nx_G.add_node(1, labels=["Person"], age=52)
nx_G.add_node(42, labels=["Product", "Item"], cost=17.2)
nx_G.add_edge(1, 42, relationshipType="BUYS", quantity=4)
G = gds.graph.networkx.load(nx_G, "purchases")
return G
importlib.reload(c_t)
def build_document_graph(summL,collN,baseDir):
from collections import defaultdict
def tree(): return defaultdict(tree)
sL = ['Chapter', 'Section', 'Subsection','id']
treeD = tree()
for i in summL:
d = dict(i.metadata)
for s in sL:
if s not in d:
d[s] =
m = {"page_content":i.page_content,"metadata":i.metadata}
treeD[d[sL[0]]][d[sL[1]]][d[sL[2]]] = m
G = nx.DiGraph(name="document_graph")
# G = nx.DiGraph(driver)
G.add_node(0,type="document")
for k1 in treeD.keys():
G.add_node(k1,type=sL[0])
for k2 in treeD[k1].keys():
G.add_node(k2,type=sL[1])
G.add_edge(k1,k2)
for k3 in treeD[k1][k2].keys():
G.add_node(k3,type=sL[2],text=treeD[k1][k2][k3]['page_content'])
G.add_edge(k2,k3)
if False:
#nx.draw_kamada_kawai(G,with_labels = True)
nx.draw_spring(G,with_labels = True)
plt.show()
nx.write_graphml(G,baseDir + collN + ".graphml")
nx.pagerank(G)
nx.betweenness_centrality(G)
nx.closeness_centrality(G)
some_dict = {'a': 1, 'b': 2}
session = driver.session()
session.run(query="CREATE (x) SET x = {dict_param}",parameters={'dict_param': some_dict})
def build_knowledge_graph(summL,collN,baseDir):
embdL = c_t.embed_text(summL)
kmeans = KMeans(init="random",n_clusters=15,n_init=10,max_iter=300,random_state=42)
kmeans.fit(embdL)
clustL = kmeans.labels_
treeD = defaultdict(list)
for i,j in enumerate(clustL):
treeD[j].append(summL[i])
print([len(treeD[x]) for x in treeD.keys()])
treeL = []
G = nx.DiGraph(name="knowledge_graph")
# G = nx.DiGraph(driver)
G.add_node("0",name="document",id="0",Chapter=collN)
for k in treeD.keys():
treeL.append("\n".join([x.page_content for x in treeD[k]]))
G.add_node(k,**x.metadata)
G.add_edge('0',k)
for x in treeD[k]:
i = x.metadata['id']
G.add_node(i,**x.metadata)
G.add_edge(k,i)
if False:
nx.draw_kamada_kawai(G,with_labels = True)
plt.show()
nx.write_graphml(G,baseDir + collN + ".graphml")
nx.pagerank(G)
nx.betweenness_centrality(G)
nx.closeness_centrality(G)
if False: #categorical metrics
scores = defaultdict(list) scores["Homogeneity"].append(metrics.homogeneity_score(labels, kmeans.labels_)) scores["Completeness"].append(metrics.completeness_score(labels, kmeans.labels_)) scores["V-measure"].append(metrics.v_measure_score(labels, kmeans.labels_)) scores["Adjusted Rand-Index"].append(metrics.adjusted_rand_score(labels, km.labels_)) scores["Silhouette Coefficient"].append(metrics.silhouette_score(X, km.labels_, sample_size=2000))
- Import movie information
movies_query = """ LOAD CSV WITH HEADERS FROM 'https://raw.githubusercontent.com/tomasonjo/blog-datasets/main/movies/movies_small.csv' AS row MERGE (m:Movie {id:row.movieId}) SET m.released = date(row.released),
m.title = row.title, m.imdbRating = toFloat(row.imdbRating)
FOREACH (director in split(row.director, '|') |
MERGE (p:Person {name:trim(director)})
MERGE (p)-[:DIRECTED]->(m))
FOREACH (actor in split(row.actors, '|') |
MERGE (p:Person {name:trim(actor)})
MERGE (p)-[:ACTED_IN]->(m))
FOREACH (genre in split(row.genres, '|') |
MERGE (g:Genre {name:trim(genre)})
MERGE (m)-[:IN_GENRE]->(g))
"""
graph.query(movies_query)
with open(baseDir + fName + '.html') as fByte:
fString = fByte.read()
response = requests.get(fUrl) soup = BeautifulSoup(response.text, "html.parser") paragraphs = soup.find_all("p") text = " ".join([p.get_text() for p in paragraphs])
user_input = "spark" openai.api_key = os.environ['OPENAI_API_KEY'] prompt = f"Help me understand following by describing as a detailed knowledge graph: {user_input}" completion: KnowledgeGraph = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k",messages=[{"role": "user","content": prompt,}],response_model=KnowledgeGraph,) response_data = completion.model_dump() edges = response_data["edges"] def _restore(e):
e["from"] = e["from_"] return e
response_data["edges"] = [_restore(e) for e in edges] results = driver.get_response_data(response_data)
dot = Digraph(comment="Knowledge Graph") response_dict = response_data for node in response_dict.get("nodes", []):
dot.node(node["id"], f"{node['label']} ({node['type']})")
for edge in response_dict.get("edges", []):
dot.edge(edge["from"], edge["to"], label=edge["relationship"])
dot.render("knowledge_graph.gv", view=False) dot.format = "png" dot.render("static/knowledge_graph", view=False) png_url = f"{request.url_root}static/knowledge_graph.png"
(nodes, edges) = driver.get_graph_data() response_dict = response_data nodes = [
{
"data": {
"id": node["id"],
"label": node["label"],
"color": node.get("color", "defaultColor"),
}
}
for node in response_dict["nodes"]
] edges = [
{
"data": {
"source": edge["from"],
"target": edge["to"],
"label": edge["relationship"],
"color": edge.get("color", "defaultColor"),
}
}
for edge in response_dict["edges"]
] graphD = jsonify({"elements": {"nodes": nodes, "edges": edges}})