Ciccia: Difference between revisions
|  (Blanked the page) Tags: Blanking Manual revert | No edit summary | ||
| Line 1: | Line 1: | ||
| from __future__ import annotations | |||
| import re | |||
| from typing import Any, Dict, List, Tuple, TypedDict, Union | |||
| from langchain_core.documents import Document | |||
| from langchain_text_splitters.base import Language | |||
| from langchain_text_splitters.character import RecursiveCharacterTextSplitter | |||
| from pymupdf4llm.helpers.get_text_lines import get_raw_lines, is_white | |||
| import matplotlib.pyplot as plt | |||
| import pymupdf | |||
| class LineType(TypedDict): | |||
|     """Line type as typed dict.""" | |||
|     metadata: Dict[str, str] | |||
|     content: str | |||
| class HeaderType(TypedDict): | |||
|     """Header type as typed dict.""" | |||
|     level: int | |||
|     name: str | |||
|     data: str | |||
| class IdentifyHeaders: | |||
|     """Compute data for identifying header text.""" | |||
|     def __init__(self,pdf_doc: str,page = None,body_limit: float = 10): | |||
|         """Read all text and make a dictionary of fontsizes. | |||
|         Args: | |||
|             body_limit: consider text with larger font size as some header | |||
|         """ | |||
|         mydoc = pymupdf.open(pdf_doc) | |||
|         fontsizes = {} | |||
|         pages = range(mydoc.page_count) | |||
|         for pno in pages: | |||
|             page = mydoc.load_page(pno) | |||
|             blocks = page.get_text("dict", flags=pymupdf.TEXTFLAGS_TEXT)["blocks"] | |||
|             for span in [  # look at all non-empty horizontal spans | |||
|                 s | |||
|                 for b in blocks | |||
|                 for l in b["lines"] | |||
|                 for s in l["spans"] | |||
|                 if not is_white(s["text"]) | |||
|             ]: | |||
|                 fontsz = round(span["size"]) | |||
|                 count = fontsizes.get(fontsz, 0) + len(span["text"].strip()) | |||
|                 fontsizes[fontsz] = count | |||
|         mydoc.close() | |||
|         self.header_id = {} | |||
|         temp = sorted([(k, v) for k, v in fontsizes.items()],key=lambda i: i[1],reverse=True,) | |||
|         b_limit = temp[0][0] | |||
|         sizes = sorted([f for f in fontsizes.keys() if f > b_limit],reverse=True,)[:8] | |||
|         for i, size in enumerate(sizes): | |||
|             self.header_id[size] = "#" * (i + 1) + " " | |||
|     def get_header_id(self, span: dict, page=None) -> str: | |||
|         """Return appropriate markdown header prefix. | |||
|         Given a text span from a "dict"/"rawdict" extraction, determine the | |||
|         markdown header prefix string of 0 to n concatenated '#' characters. | |||
|         """ | |||
|         fontsize = round(span["size"])  # compute fontsize | |||
|         hdr_id = self.header_id.get(fontsize, "") | |||
|         return hdr_id | |||
| def aggregate_lines_to_chunks(lines: List[LineType]) -> List[Document]: | |||
|     """Combine lines with common metadata into chunks | |||
|         Args: | |||
|             lines: Line of text / associated header metadata | |||
|     """ | |||
| def split_text(text: str,headers_split: List[Tuple[str, str]]) -> List[Document]: | |||
|     """Split markdown file | |||
|         Args: | |||
|             text: Markdown file""" | |||
|     lines = text.split("\n") | |||
|     lines_with_metadata: List[LineType] = [] | |||
|     current_content: List[str] = [] | |||
|     current_metadata: Dict[str, str] = {} | |||
|     current_metadata['type'] = 'text' | |||
|     header_stack: List[HeaderType] = [] | |||
|     initial_metadata: Dict[str, str] = {} | |||
|     in_code_block = False | |||
|     opening_fence = "" | |||
|     for line in lines: | |||
|         stripped_line = line.strip() | |||
|         stripped_line = "".join(filter(str.isprintable, stripped_line)) | |||
|         if stripped_line == '': | |||
|             continue | |||
|         current_header_level = 0 | |||
|         if stripped_line.startswith("-"): | |||
|             initial_metadata['type'] = 'break' | |||
|         elif stripped_line.startswith("```") or stripped_line.startswith("[[Special:Contributions/84.173.129.224|84.173.129.224]]"): | |||
|             initial_metadata['type'] = 'code' | |||
|             in_code_block = True | |||
|             opening_fence = "```" | |||
|         elif stripped_line.startswith("|"): | |||
|             initial_metadata['type'] = 'table' | |||
|         elif not in_code_block: | |||
|             initial_metadata['type'] = 'text' | |||
|         if in_code_block: | |||
|             if stripped_line.startswith(opening_fence): | |||
|                 in_code_block = False | |||
|                 opening_fence = "" | |||
|         for sep, name in headers_split: #if header create index | |||
|             if stripped_line.startswith(sep) and (len(stripped_line) == len(sep) or stripped_line[len(sep)] == " "): | |||
|                 current_header_level = sep.count("#") | |||
|                 while (header_stack and header_stack[-1]["level"] >= current_header_level): | |||
|                     popped_header = header_stack.pop() | |||
|                     if popped_header["name"] in initial_metadata: | |||
|                         initial_metadata.pop(popped_header["name"]) | |||
|                 header: HeaderType = {"level": current_header_level,"name": name,"data": stripped_line[len(sep):].strip()} | |||
|                 header_stack.append(header) | |||
|                 initial_metadata[name] = header["data"] | |||
|         if current_metadata['type'] != initial_metadata['type']: | |||
|             lines_with_metadata.append({"content":"\n".join(current_content),"metadata":current_metadata.copy()}) | |||
|             current_content.clear()             | |||
|         current_metadata = initial_metadata.copy() | |||
|         if current_header_level == 0: | |||
|             current_content.append(stripped_line) | |||
|         else: | |||
|             lines_with_metadata.append({"content":"\n".join(current_content),"metadata":current_metadata.copy()}) | |||
|             current_content.clear() | |||
|     lines_with_metadata.append({"content":"\n".join(current_content),"metadata":current_metadata.copy()}) | |||
|     #lines_with_metadata.append({"content":"\n".join(current_content),"metadata":current_metadata}) | |||
|     docL = [x for x in lines_with_metadata if x['content'] != ''] | |||
|     docL = [x for x in docL if x['metadata']['type'] != 'break'] | |||
|     # aggregated_chunks: List[LineType] = [] | |||
|     # for line in lines_with_metadata: | |||
|     #     if (aggregated_chunks and aggregated_chunks[-1]["metadata"] == line["metadata"]): | |||
|     #         aggregated_chunks[-1]["content"] += "  \n" + line["content"] | |||
|     #     elif (aggregated_chunks | |||
|     #           and aggregated_chunks[-1]["metadata"] != line["metadata"] | |||
|     #           and len(aggregated_chunks[-1]["metadata"]) < len(line["metadata"]) | |||
|     #           and aggregated_chunks[-1]["content"].split("\n")[-1][0] == "#" | |||
|     #           and False | |||
|     #         ): | |||
|     #         aggregated_chunks[-1]["content"] += "  \n" + line["content"] | |||
|     #         aggregated_chunks[-1]["metadata"] = line["metadata"] | |||
|     #     else: | |||
|     #         aggregated_chunks.append(line) | |||
|     return [ | |||
|         Document(page_content=chunk["content"], metadata=chunk["metadata"]) | |||
|         for chunk in docL | |||
|     ] | |||
| def chunk_distibution(docL): | |||
|     statL = [] | |||
|     for d in docL: | |||
|         s = d.page_content | |||
|         statL.append({"characters":len(s),"phrases":s.count("."),"lines":s.count("\n"),"words":s.count(" ")}) | |||
|     statD = pd.DataFrame(statL) | |||
|     statD.sort_values("lines",inplace=True) | |||
|     statD.hist() | |||
|     plt.show() | |||
Revision as of 14:19, 20 February 2025
from __future__ import annotations import re from typing import Any, Dict, List, Tuple, TypedDict, Union from langchain_core.documents import Document from langchain_text_splitters.base import Language from langchain_text_splitters.character import RecursiveCharacterTextSplitter from pymupdf4llm.helpers.get_text_lines import get_raw_lines, is_white import matplotlib.pyplot as plt import pymupdf
class LineType(TypedDict):
"""Line type as typed dict.""" metadata: Dict[str, str] content: str
class HeaderType(TypedDict):
"""Header type as typed dict.""" level: int name: str data: str
class IdentifyHeaders:
   """Compute data for identifying header text."""
   def __init__(self,pdf_doc: str,page = None,body_limit: float = 10):
       """Read all text and make a dictionary of fontsizes.
       Args:
           body_limit: consider text with larger font size as some header
       """
       mydoc = pymupdf.open(pdf_doc)
       fontsizes = {}
       pages = range(mydoc.page_count)
       for pno in pages:
           page = mydoc.load_page(pno)
           blocks = page.get_text("dict", flags=pymupdf.TEXTFLAGS_TEXT)["blocks"]
           for span in [  # look at all non-empty horizontal spans
               s
               for b in blocks
               for l in b["lines"]
               for s in l["spans"]
               if not is_white(s["text"])
           ]:
               fontsz = round(span["size"])
               count = fontsizes.get(fontsz, 0) + len(span["text"].strip())
               fontsizes[fontsz] = count
       mydoc.close()
       self.header_id = {}
       temp = sorted([(k, v) for k, v in fontsizes.items()],key=lambda i: i[1],reverse=True,)
       b_limit = temp[0][0]
       sizes = sorted([f for f in fontsizes.keys() if f > b_limit],reverse=True,)[:8]
       for i, size in enumerate(sizes):
           self.header_id[size] = "#" * (i + 1) + " "
   def get_header_id(self, span: dict, page=None) -> str:
       """Return appropriate markdown header prefix.
       Given a text span from a "dict"/"rawdict" extraction, determine the
       markdown header prefix string of 0 to n concatenated '#' characters.
       """
       fontsize = round(span["size"])  # compute fontsize
       hdr_id = self.header_id.get(fontsize, "")
       return hdr_id
def aggregate_lines_to_chunks(lines: List[LineType]) -> List[Document]:
   """Combine lines with common metadata into chunks
       Args:
           lines: Line of text / associated header metadata
   """
   
def split_text(text: str,headers_split: List[Tuple[str, str]]) -> List[Document]:
   """Split markdown file
       Args:
           text: Markdown file"""
   lines = text.split("\n")
   lines_with_metadata: List[LineType] = []
   current_content: List[str] = []
   current_metadata: Dict[str, str] = {}
   current_metadata['type'] = 'text'
   header_stack: List[HeaderType] = []
   initial_metadata: Dict[str, str] = {}
   in_code_block = False
   opening_fence = ""
   for line in lines:
       stripped_line = line.strip()
       stripped_line = "".join(filter(str.isprintable, stripped_line))
       if stripped_line == :
           continue
       current_header_level = 0
       if stripped_line.startswith("-"):
           initial_metadata['type'] = 'break'
       elif stripped_line.startswith("```") or stripped_line.startswith("84.173.129.224"):
           initial_metadata['type'] = 'code'
           in_code_block = True
           opening_fence = "```"
       elif stripped_line.startswith("|"):
           initial_metadata['type'] = 'table'
       elif not in_code_block:
           initial_metadata['type'] = 'text'
       if in_code_block:
           if stripped_line.startswith(opening_fence):
               in_code_block = False
               opening_fence = ""
       for sep, name in headers_split: #if header create index
           if stripped_line.startswith(sep) and (len(stripped_line) == len(sep) or stripped_line[len(sep)] == " "):
               current_header_level = sep.count("#")
               while (header_stack and header_stack[-1]["level"] >= current_header_level):
                   popped_header = header_stack.pop()
                   if popped_header["name"] in initial_metadata:
                       initial_metadata.pop(popped_header["name"])
               header: HeaderType = {"level": current_header_level,"name": name,"data": stripped_line[len(sep):].strip()}
               header_stack.append(header)
               initial_metadata[name] = header["data"]
       if current_metadata['type'] != initial_metadata['type']:
           lines_with_metadata.append({"content":"\n".join(current_content),"metadata":current_metadata.copy()})
           current_content.clear()            
       current_metadata = initial_metadata.copy()
       if current_header_level == 0:
           current_content.append(stripped_line)
       else:
           lines_with_metadata.append({"content":"\n".join(current_content),"metadata":current_metadata.copy()})
           current_content.clear()
   lines_with_metadata.append({"content":"\n".join(current_content),"metadata":current_metadata.copy()})
   #lines_with_metadata.append({"content":"\n".join(current_content),"metadata":current_metadata})
   docL = [x for x in lines_with_metadata if x['content'] != ]
   docL = [x for x in docL if x['metadata']['type'] != 'break']
   # aggregated_chunks: List[LineType] = []
   # for line in lines_with_metadata:
   #     if (aggregated_chunks and aggregated_chunks[-1]["metadata"] == line["metadata"]):
   #         aggregated_chunks[-1]["content"] += "  \n" + line["content"]
   #     elif (aggregated_chunks
   #           and aggregated_chunks[-1]["metadata"] != line["metadata"]
   #           and len(aggregated_chunks[-1]["metadata"]) < len(line["metadata"])
   #           and aggregated_chunks[-1]["content"].split("\n")[-1][0] == "#"
   #           and False
   #         ):
   #         aggregated_chunks[-1]["content"] += "  \n" + line["content"]
   #         aggregated_chunks[-1]["metadata"] = line["metadata"]
   #     else:
   #         aggregated_chunks.append(line)
   return [
       Document(page_content=chunk["content"], metadata=chunk["metadata"])
       for chunk in docL
   ]
           
def chunk_distibution(docL):
   statL = []
   for d in docL:
       s = d.page_content
       statL.append({"characters":len(s),"phrases":s.count("."),"lines":s.count("\n"),"words":s.count(" ")})
   statD = pd.DataFrame(statL)
   statD.sort_values("lines",inplace=True)
   statD.hist()
   plt.show()