No edit summary
No edit summary
Line 1: Line 1:
#https://github.com/camelot-dev/camelot/wiki/Comparison-with-other-PDF-Table-Extraction-libraries-and-tools
import fitz  # PyMuPDF
#https://datascience.blog.wzb.eu/category/pdfs/
import pdfplumber
import os, sys, json, re, pathlib
import re
import base64, io
import pytesseract
import subprocess
import cv2
import numpy as np
import numpy as np
import pandas as pd
from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
import requests
import torch
 
subprocess.run(["echo","$VIRTUAL_ENV"],shell=True)
baseDir = os.environ['HOME'] + '/lav/dauvi/portfolio/audit/'
fName = "foo"
fName = "am35"
fName = "iplex_nx"
fName = "AM5386"
#fName = "Policies"
fPath = baseDir + fName + '.pdf'
fUrl = "https://www.olympus-ims.com/en/rvi-products/iplex-nx/#!cms[focus]=cmsContent13653"
 
#-------------------------------------------------unstructured-----------------------------------
from langchain_community.document_loaders import UnstructuredPDFLoader
loader = UnstructuredPDFLoader(fPath, mode="elements")
data = loader.load()
 
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
from langchain.chat_models import ChatOpenAI
from langchain.schema.messages import HumanMessage
from PIL import Image
from PIL import Image
import os
import logging
import traceback
import warnings
from pathlib import Path
from abc import ABC, abstractmethod
import argparse


elements = partition_pdf(filename=fPath,extract_images_in_pdf=True,infer_table_structure=True,chunking_strategy="by_title",max_characters=4000,new_after_n_chars=3800,combine_text_under_n_chars=2000,image_output_dir_path=baseDir+"pdfImages/")
warnings.filterwarnings("ignore")


llm = ChatOpenAI(model="gpt-4-vision-preview")
class PDFExtractor(ABC):
def image_to_base64(image_path):
     """Abstract base class for PDF extraction."""
     with Image.open(image_path) as image:
        buffered = io.BytesIO()
        image.save(buffered, format=image.format)
        img_str = base64.b64encode(buffered.getvalue())
        return img_str.decode('utf-8')


image_str = image_to_base64("static/pdfImages/figure-15-6.jpg")
    def __init__(self, pdf_path):
chat = ChatOpenAI(model="gpt-4-vision-preview",max_tokens=1024)
        self.pdf_path = pdf_path
msg = chat.invoke([HumanMessage(content=[{"type": "text", "text" : "Please give a summary of the image provided. Be descriptive"},{"type": "image_url","image_url": {"url": f"data:image/jpeg;base64,{image_str}"},},])])
        self.setup_logging()
msg.content


#-------------------------------------pypdfium2-------------------------------------------------
    def setup_logging(self):
from langchain_community.document_loaders import PyPDFium2Loader
        """Set up logging configuration."""
loader = PyPDFium2Loader(fPath)
        log_file = f"{Path(__file__).stem}.log"
data = loader.load()
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)


#----------------------------------------pdfminer------------------------------------------------
    @abstractmethod
    def extract(self):
        """Abstract method for extracting content from PDF."""
        pass


from langchain_community.document_loaders import PDFMinerLoader
class MarkdownPDFExtractor(PDFExtractor):
from langchain_community.document_loaders import PDFMinerPDFasHTMLLoader
    """Class for extracting markdown-formatted content from PDF."""


loader = PDFMinerPDFasHTMLLoader(fPath)
    BULLET_POINTS = '•◦▪▫●○'
data = loader.load()


#-----------------------------------------texatract----------------------------------------------
    def __init__(self, pdf_path):
        super().__init__(pdf_path)
        self.pdf_filename = Path(pdf_path).stem
        self.setup_image_captioning()


from langchain_community.document_loaders import AmazonTextractPDFLoader
    def setup_image_captioning(self):
from textractor.data.constants import TextractFeatures
        """Set up the image captioning model."""
from textractor import TExtractor
        try:
from textractor import Textractor
            self.model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
            self.feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
            self.tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            self.model.to(self.device)
            self.logger.info("Image captioning model set up successfully.")
        except Exception as e:
            self.logger.error(f"Error setting up image captioning model: {e}")
            self.logger.exception(traceback.format_exc())


    def extract(self):
        try:
            markdown_content, markdown_pages = self.extract_markdown()
            self.save_markdown(markdown_content)
            self.logger.info(f"Markdown content has been saved to outputs/{self.pdf_filename}.md")
            return markdown_pages
       
        except Exception as e:
            self.logger.error(f"Error processing PDF: {e}")
            self.logger.exception(traceback.format_exc())
            return "", []


loader = AmazonTextractPDFLoader(baseDir + "szx7.png")
    def extract_markdown(self):
documents = loader.load()
        """Main method to extract markdown from PDF."""
extractor = TExtractor(profile_name="default")
        try:
document = extractor.analyze_document(
            doc = fitz.open(self.pdf_path)
file_source=baseDir + "szx7.png",
            markdown_content = ""
features=[TextractFeatures.TABLES]
            markdown_pages = []
)
            tables = self.extract_tables()
document.tables[0].to_excel(baseDir+"output.xlsx")
            table_index = 0
            list_counter = 0
            in_code_block = False
            code_block_content = ""
            code_block_lang = None
            prev_line = ""


extractor = Textractor(profile_name="default")
            for page_num, page in enumerate(doc):
from textractor.data.constants import TextractFeatures
                self.logger.info(f"Processing page {page_num + 1}")
document = extractor.analyze_document(
                page_content = ""
    file_source="tests/fixtures/form.png",
                blocks = page.get_text("dict")["blocks"]
    features=[TextractFeatures.TABLES]
                page_height = page.rect.height
)
                links = self.extract_links(page)
document.tables[0].to_excel("output.xlsx")


                for block in blocks:
                    if block["type"] == 0:  # Text
                        page_content += self.process_text_block(block, page_height, links, list_counter, in_code_block, code_block_content, code_block_lang, prev_line)
                    elif block["type"] == 1:  # Image
                        page_content += self.process_image_block(page, block)


#-----------------------------------------azure------------------------------------------------
                # Insert tables at their approximate positions
                while table_index < len(tables) and tables[table_index]["page"] == page.number:
                    page_content += "\n\n" + self.table_to_markdown(tables[table_index]["content"]) + "\n\n"
                    table_index += 1


%pip install --upgrade --quiet  langchain langchain-community azure-ai-documentintelligence
                markdown_pages.append(self.post_process_markdown(page_content))
from langchain_community.document_loaders import AzureAIDocumentIntelligenceLoader
                markdown_content += page_content
loader = AzureAIDocumentIntelligenceLoader(api_endpoint="", api_key="", file_path=fPath, api_model="prebuilt-layout")
documents = loader.load()


#-------------------------------------------upstage---------------------------------------------
            markdown_content = self.post_process_markdown(markdown_content)
            return markdown_content, markdown_pages
        except Exception as e:
            self.logger.error(f"Error extracting markdown: {e}")
            self.logger.exception(traceback.format_exc())
            return "", []


from langchain_upstage import UpstageLayoutAnalysisLoader
    def extract_tables(self):
os.environ["UPSTAGE_DOCUMENT_AI_API_KEY"] = "YOUR_API_KEY"
        """Extract tables from PDF using pdfplumber."""
loader = UpstageLayoutAnalysisLoader(fPath)
        tables = []
data = loader.load()
        try:
            with pdfplumber.open(self.pdf_path) as pdf:
                for page_number, page in enumerate(pdf.pages):
                    page_tables = page.extract_tables()
                    for table in page_tables:
                        tables.append({"page": page_number, "content": table})
                        self.logger.info(f"Extracted {len(tables)} tables from the PDF.")
        except Exception as e:
            self.logger.error(f"Error extracting tables: {e}")
            self.logger.exception(traceback.format_exc())
        return tables


#----------------------------------------------agent-chunking-------------------------------------
    def table_to_markdown(self, table):
        """Convert a table to markdown format."""
        if not table:
            return ""


from langchain.output_parsers.openai_tools import JsonOutputToolsParser
        try:
from langchain_community.chat_models import ChatOpenAI
            table = [['' if cell is None else str(cell).strip() for cell in row] for row in table]
from langchain_core.prompts import ChatPromptTemplate
            col_widths = [max(len(cell) for cell in col) for col in zip(*table)]
from langchain_core.runnables import RunnableLambda
from langchain.chains import create_extraction_chain
from typing import Optional, List
from langchain.chains import create_extraction_chain_pydantic
from langchain_core.pydantic_v1 import BaseModel
from langchain import hub


obj = hub.pull("wfh/proposal-indexing")
            markdown = ""
llm = ChatOpenAI(model='gpt-4-1106-preview', openai_api_key = os.getenv("OPENAI_API_KEY", 'YouKey'))
            for i, row in enumerate(table):
runnable = obj | llm
                formatted_row = [cell.ljust(col_widths[j]) for j, cell in enumerate(row)]
                markdown += "| " + " | ".join(formatted_row) + " |\n"


class Sentences(BaseModel):
                if i == 0:
    sentences: List[str]
                    markdown += "|" + "|".join(["-" * (width + 2) for width in col_widths]) + "|\n"
   
extraction_chain = create_extraction_chain_pydantic(pydantic_schema=Sentences, llm=llm)
def get_propositions(text):
    runnable_output = runnable.invoke({"input": text}).content
    propositions = extraction_chain.run(runnable_output)[0].sentences
    return propositions


with open(baseDir + "AM5386" + '.txt') as f:
            return markdown
    essay = f.read()
        except Exception as e:
            self.logger.error(f"Error converting table to markdown: {e}")
            self.logger.exception(traceback.format_exc())
            return ""


paragraphs = essay.split("\n\n")
    def perform_ocr(self, image):
len(paragraphs)
        """Perform OCR on the given image."""
essay_propositions = []
        try:
for i, para in enumerate(paragraphs[:5]):
            opencv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
    propositions = get_propositions(para)
            ocr_result = pytesseract.image_to_data(opencv_image, output_type=pytesseract.Output.DICT)
    essay_propositions.extend(propositions)
           
    print (f"Done with {i}")
            result = ""
            for word in ocr_result['text']:
                if word.strip() != "":
                    result += word + " "


print (f"You have {len(essay_propositions)} propositions")
                if len(result) > 30:
essay_propositions[:10]
                    break
               
            return result.strip()
        except Exception as e:
            self.logger.error(f"Error performing OCR: {e}")
            self.logger.exception(traceback.format_exc())
            return ""


#------------------------------------mathpix----------------------------------------------------
    def caption_image(self, image):
        """Generate a caption for the given image."""
        try:
            ocr_text = self.perform_ocr(image)
            if ocr_text:
                return ocr_text
           
            inputs = self.feature_extractor(images=image, return_tensors="pt").to(self.device)
            pixel_values = inputs.pixel_values


from langchain_community.document_loaders import MathpixPDFLoader
            generated_ids = self.model.generate(pixel_values, max_length=30)
loader = MathpixPDFLoader(fPath)
            generated_caption = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
            return generated_caption.strip()
        except Exception as e:
            self.logger.error(f"Error captioning image: {e}")
            self.logger.exception(traceback.format_exc())
            return "Image"


#------------------------------------diffbot--------------------------------------------------------
    def clean_text(self, text):
        """Clean the given text by removing extra spaces."""
        text = text.strip()
        text = re.sub(r'\s+', ' ', text)
        return text


from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer
    def apply_formatting(self, text, flags):
diffbot_nlp = DiffbotGraphTransformer(diffbot_api_key=os.getenv("DIFFBOT_API_KEY", 'YourKey'))
        """Apply markdown formatting to the given text based on flags."""
text = """
        text = text.strip()
Greg is friends with Bobby. San Francisco is a great city, but New York is amazing.
        if not text:
Greg lives in New York.
            return text
"""
       
docs = [Document(page_content=text)]
        is_bold = flags & 2**4
graph_documents = diffbot_nlp.convert_to_graph_documents(docs)
        is_italic = flags & 2**1
graph_documents
        is_monospace = flags & 2**3
   
        is_superscript = flags & 2**0
#-------------------------------------------------tika-------------------------------------------
        is_subscript = flags & 2**5


import tika
        if is_monospace:
tika.initVM()
            text = f"`{text}`"
from tika import parser, detector
        elif is_superscript and not bool(re.search(r'\s+', text)):
parsed = parser.from_file(fPath,xmlContent=True)
            text = f"^{text}^"
print(parsed["content"])
        elif is_subscript and not bool(re.search(r'\s+', text)):
print(detector.from_file(fPath))
            text = f"~{text}~"


#---------------------------------------------------pymupdf---------------------------------------
        if is_bold and is_italic:
            text = f"***{text}***"
        elif is_bold:
            text = f"**{text}**"
        elif is_italic:
            text = f"*{text}*"


import pymupdf
        return f" {text} "
import pymupdf4llm
import markdown
with pymupdf.open(fPath) as doc: 
    text = chr(12).join([page.get_text() for page in doc])


pathlib.Path(baseDir + fName + ".txt").write_bytes(text.encode())
    def is_bullet_point(self, text):
md_text = pymupdf4llm.to_markdown(fPath)
        """Check if the given text is a bullet point."""
pathlib.Path(baseDir + fName + ".md").write_bytes(md_text.encode())
        return text.strip().startswith(tuple(self.BULLET_POINTS))
html_text = markdown(md_text,extensions=['markdown.extensions.tables'])
pathlib.Path(baseDir + fName + ".html").write_bytes(html_text.encode())


#---------------------------------------beatifulsoup---------------------------------------------
    def convert_bullet_to_markdown(self, text):
        """Convert a bullet point to markdown format."""
        text = re.sub(r'^\s*', '', text)
        return re.sub(f'^[{re.escape(self.BULLET_POINTS)}]\s*', '- ', text)


from bs4 import BeautifulSoup
    def is_numbered_list_item(self, text):
with open(baseDir + fName + '.html') as fByte:
        """Check if the given text is a numbered list item."""
    fString = fByte.read()
        return bool(re.match(r'^\d+\s{0,3}[.)]', text.strip()))
response = requests.get(fUrl)  


with open(baseDir + 'iplex.html','w') as fByte:
    def convert_numbered_list_to_markdown(self, text, list_counter):
    fByte.write(response.text)
        """Convert a numbered list item to markdown format."""
        text = re.sub(r'^\s*', '', text)
        return re.sub(r'^\d+\s{0,3}[.)]', f"{list_counter}. ", text)


soup = BeautifulSoup(response.text, 'html.parser')
    def is_horizontal_line(self, text):
tableL = soup.find_all('table')
        """Check if the given text represents a horizontal line."""
tableS = "".join([str(t) for t in tableL])
         return bool(re.match(r'^[_-]+$', text.strip()))
tabDf = pd.read_html(tableS)
for tab in tableL:
    t = str(tab)
    if re.search("flexibility gradually",t):
         tabD  = pd.read_html(t, header=[0,1])[0]
        break
tabD.to_csv(baseDir + "implex.csv",index=False)


#------------------------------------------pdftabextract------------------------------------------
    def extract_links(self, page):
 
        """Extract links from the given page."""
from pdftabextract import imgproc
        links = []
from pdftabextract.common import read_xml, parse_pages
        try:
from math import radians, degrees
            for link in page.get_links():
from pdftabextract.common import ROTATION, SKEW_X, SKEW_Y
                if link["kind"] == 2: # URI link
from pdftabextract.geom import pt
                    links.append({
from pdftabextract.textboxes import rotate_textboxes, deskew_textboxes
                        "rect": link["from"],
from pdftabextract.clustering import find_clusters_1d_break_dist
                        "uri": link["uri"]
from pdftabextract.clustering import calc_cluster_centers_1d
                    })
from pdftabextract.clustering import zip_clusters_and_values
                    self.logger.info(f"Extracted {len(links)} links from the page.")
from pdftabextract.textboxes import border_positions_from_texts, split_texts_by_positions, join_texts
         except Exception as e:
from pdftabextract.common import all_a_in_b, DIRECTION_VERTICAL
            self.logger.error(f"Error extracting links: {e}")
from pdftabextract.extract import make_grid_from_positions
            self.logger.exception(traceback.format_exc())
from pdftabextract.common import save_page_grids
        return links
from pdftabextract.extract import fit_texts_into_grid, datatable_to_dataframe
 
xPath = baseDir + "output.xml"
xmltree, xmlroot = read_xml(xPath)
p_num = 3
p = pages[p_num]
pages = parse_pages(xmlroot)
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(baseDir, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
iproc_obj = imgproc.ImageProc(imgfile)
page_scaling_x = iproc_obj.img_w / p['width']   # scaling in X-direction
page_scaling_y = iproc_obj.img_h / p['height']  # scaling in Y-direction
lines_hough = iproc_obj.detect_lines(canny_kernel_size=3, canny_low_thresh=50, canny_high_thresh=150,
                                    hough_rho_res=1,
                                    hough_theta_res=np.pi/500,
                                    hough_votes_thresh=round(0.2 * iproc_obj.img_w))
print("> found %d lines" % len(lines_hough))
import cv2
def save_image_w_lines(iproc_obj, imgfilebasename):
    img_lines = iproc_obj.draw_lines(orig_img_as_background=True)
    img_lines_file = os.path.join(baseDir, '%s-lines-orig.png' % imgfilebasename)
   
    print("> saving image with detected lines to '%s'" % img_lines_file)
    cv2.imwrite(img_lines_file, img_lines)
 
save_image_w_lines(iproc_obj, imgfilebasename)
rot_or_skew_type, rot_or_skew_radians = iproc_obj.find_rotation_or_skew(radians(0.5),
                                                                        radians(1),
                                                                        omit_on_rot_thresh=radians(0.5))
 
needs_fix = True
if rot_or_skew_type == ROTATION:
    print("> rotating back by %f°" % -degrees(rot_or_skew_radians))
    rotate_textboxes(p, -rot_or_skew_radians, pt(0, 0))
elif rot_or_skew_type in (SKEW_X, SKEW_Y):
    print("> deskewing in direction '%s' by %f°" % (rot_or_skew_type, -degrees(rot_or_skew_radians)))
    deskew_textboxes(p, -rot_or_skew_radians, rot_or_skew_type, pt(0, 0))
else:
    needs_fix = False
    print("> no page rotation / skew found")
if needs_fix:
    lines_hough = iproc_obj.apply_found_rotation_or_skew(rot_or_skew_type, -rot_or_skew_radians)
    save_image_w_lines(iproc_obj, imgfilebasename + '-repaired')
 
output_files_basename = xPath[:xPath.rindex('.')]
repaired_xmlfile = os.path.join(xPath, output_files_basename + '.repaired.xml')
print("saving repaired XML file to '%s'..." % repaired_xmlfile)
xmltree.write(repaired_xmlfile)
 
MIN_COL_WIDTH = 60
vertical_clusters = iproc_obj.find_clusters(imgproc.DIRECTION_VERTICAL, find_clusters_1d_break_dist,
                                            remove_empty_cluster_sections_use_texts=p['texts'],
                                            remove_empty_cluster_sections_n_texts_ratio=0.1,
                                            remove_empty_cluster_sections_scaling=page_scaling_x,
                                            dist_thresh=MIN_COL_WIDTH/2)
print("> found %d clusters" % len(vertical_clusters))
img_w_clusters = iproc_obj.draw_line_clusters(imgproc.DIRECTION_VERTICAL, vertical_clusters)
save_img_file = os.path.join(baseDir, '%s-vertical-clusters.png' % imgfilebasename)
print("> saving image with detected vertical clusters to '%s'" % save_img_file)
cv2.imwrite(save_img_file, img_w_clusters)
page_colpos = np.array(calc_cluster_centers_1d(vertical_clusters)) / page_scaling_x
print('found %d column borders:' % len(page_colpos))
print(page_colpos)
col2_rightborder = page_colpos[2]
median_text_height = np.median([t['height'] for t in p['texts']])
text_height_deviation_thresh = median_text_height / 2
texts_cols_1_2 = [t for t in p['texts']
                  if t['right'] <= col2_rightborder
                    and abs(t['height'] - median_text_height) <= text_height_deviation_thresh]
borders_y = border_positions_from_texts(texts_cols_1_2, DIRECTION_VERTICAL)
clusters_y = find_clusters_1d_break_dist(borders_y, dist_thresh=median_text_height/2)
clusters_w_vals = zip_clusters_and_values(clusters_y, borders_y)
pos_y = calc_cluster_centers_1d(clusters_w_vals)
pos_y.append(p['height'])
print('number of line positions:', len(pos_y))
pttrn_table_row_beginning = re.compile(r'^[\d Oo][\d Oo]{2,} +[A-ZÄÖÜ]')
texts_cols_1_2_per_line = split_texts_by_positions(texts_cols_1_2, pos_y, DIRECTION_VERTICAL,
                                                  alignment='middle',
                                                  enrich_with_positions=True)
for line_texts, (line_top, line_bottom) in texts_cols_1_2_per_line:
    line_str = join_texts(line_texts)
    if pttrn_table_row_beginning.match(line_str)
         top_y = line_top
        break
else:
    top_y = 0
 
words_in_footer = ('anzeige', 'annahme', 'ala')
min_footer_text_height = median_text_height * 1.5
min_footer_y_pos = p['height'] * 0.7
bottom_texts = [t for t in p['texts']
                if t['top'] >= min_footer_y_pos and t['height'] >= min_footer_text_height]
bottom_texts_per_line = split_texts_by_positions(bottom_texts,
                                                pos_y + [p['height']],
                                                DIRECTION_VERTICAL,
                                                alignment='middle',
                                                enrich_with_positions=True)
page_span = page_colpos[-1] - page_colpos[0]
min_footer_text_width = page_span * 0.8
for line_texts, (line_top, line_bottom) in bottom_texts_per_line:
    line_str = join_texts(line_texts)
    has_wide_footer_text = any(t['width'] >= min_footer_text_width for t in line_texts)
    if has_wide_footer_text or all_a_in_b(words_in_footer, line_str):
        bottom_y = line_top
        break
else:
    bottom_y = p['height']
 
page_rowpos = [y for y in pos_y if top_y <= y <= bottom_y]
print("> page %d: %d lines between [%f, %f]" % (p_num, len(page_rowpos), top_y, bottom_y))
grid = make_grid_from_positions(page_colpos, page_rowpos)
n_rows = len(grid)
n_cols = len(grid[0])
print("> page %d: grid with %d rows, %d columns" % (p_num, n_rows, n_cols))
page_grids_file = os.path.join(baseDir, output_files_basename + '.pagegrids_p3_only.json')
print("saving page grids JSON file to '%s'" % page_grids_file)
save_page_grids({p_num: grid}, page_grids_file)
datatable = fit_texts_into_grid(p['texts'], grid)
df = datatable_to_dataframe(datatable)
df.head(n=10)
csv_output_file = os.path.join(baseDir, output_files_basename + '-p3_only.csv')
print("saving extracted data to '%s'" % csv_output_file)
df.to_csv(csv_output_file, index=False)
excel_output_file = os.path.join(baseDir, output_files_basename + '-p3_only.xlsx')
print("saving extracted data to '%s'" % excel_output_file)
df.to_excel(excel_output_file, index=False)
 
 
#------------------------------------------table-extract-------------------------------------------
import pdftableextract as pdf
root, ext = os.path.splitext(os.path.basename(fPath))
pages = ['1']
cells = [pdf.process_page(sys.argv[1], p) for p in pages]
cells = [cell for row in cells for cell in row]
 
tables = pdf.table_to_list(cells, pages)
for i, table in enumerate(tables[1:]):
    df = pd.DataFrame(table)
    out = '{}-page-1-table-{}.csv'.format(root, i + 1)
    df.to_csv(out, index=False, quoting=1, encoding='utf-8')
 
#-------------------------------pdftables------------------------------------------------
resq = requests.post("https://pdftables.com/api?key="+os.environ['PDFTABLES_KEY']+"&format=xlsx-single")


    def detect_code_block(self, prev_line, current_line):
        """Detect if the current line starts a code block."""
        patterns = {
            'python': [
                (r'^(?:from|import)\s+\w+', r'^(?:from|import|def|class|if|for|while|try|except|with)\s'),
                (r'^(?:def|class)\s+\w+', r'^\s{4}'),
                (r'^\s{4}', r'^\s{4,}')
            ],
            'javascript': [
                (r'^(?:function|const|let|var)\s+\w+', r'^(?:function|const|let|var|if|for|while|try|catch|class)\s'),
                (r'^(?:if|for|while)\s*\(', r'^\s{2,}'),
                (r'^\s{2,}', r'^\s{2,}')
            ],
            'html': [
                (r'^<(!DOCTYPE|html|head|body|div|p|a|script|style)', r'^<(!DOCTYPE|html|head|body|div|p|a|script|style)'),
                (r'^<\w+.*>$', r'^\s{2,}<'),
                (r'^\s{2,}<', r'^\s{2,}<')
            ],
            'shell': [
                (r'^(?:\$|\#)\s', r'^(?:\$|\#)\s'),
                (r'^[a-z_]+\s*=', r'^[a-z_]+\s*=')
            ],
            'bash': [
                (r'^(?:#!/bin/bash|alias|export|source)\s', r'^(?:#!/bin/bash|alias|export|source|echo|read|if|for|while|case|function)\s'),
                (r'^(?:if|for|while|case|function)\s', r'^\s{2,}'),
                (r'^\s{2,}', r'^\s{2,}')
            ],
            'cpp': [
                (r'^#include\s*<', r'^(?:#include|using|namespace|class|struct|enum|template|typedef)\s'),
                (r'^(?:class|struct|enum)\s+\w+', r'^\s{2,}'),
                (r'^\s{2,}', r'^\s{2,}')
            ],
            'java': [
                (r'^(?:import|package)\s+\w+', r'^(?:import|package|public|private|protected|class|interface|enum)\s'),
                (r'^(?:public|private|protected)\s+class\s+\w+', r'^\s{4,}'),
                (r'^\s{4,}', r'^\s{4,}')
            ],
            'json': [
                (r'^\s*{', r'^\s*["{[]'),
                (r'^\s*"', r'^\s*["}],?$'),
                (r'^\s*\[', r'^\s*[}\]],?$')
            ]
        }
       
        for lang, pattern_pairs in patterns.items():
            for prev_pattern, curr_pattern in pattern_pairs:
                if (re.match(prev_pattern, prev_line.strip()) and
                    re.match(curr_pattern, current_line.strip())):
                    return lang
               
        return None


#-------------------------------tika--------------------------------------------
    def process_text_block(self, block, page_height, links, list_counter, in_code_block, code_block_content, code_block_lang, prev_line):
        """Process a text block and convert it to markdown."""
        try:
            block_rect = block["bbox"]
            if block_rect[1] < 50 or block_rect[3] > page_height - 50:
                return ""  # Skip headers and footers


import tika
            block_text = ""
tika.initVM()
            last_y1 = None
from tika import parser
            last_font_size = None
parsed = parser.from_file(fPath)
print(parsed["metadata"])
print(parsed["content"])
                   
#----------------------------pypdf------------------------------------------------
from pypdf import PdfReader
reader = PdfReader(fPath)
number_of_pages = len(reader.pages)
page = reader.pages[0]
text = page.extract_text()


#----------------------------llmsherpa-------------------------------------------
            for line in block["lines"]:
                line_text = ""
                curr_font_size = [span["size"] for span in line["spans"]]


from llmsherpa.readers import LayoutPDFReader
                for span in line["spans"]:
pdf_reader = LayoutPDFReader("https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all")
                    text = span["text"]
doc = pdf_reader.read_pdf(fPath)
                    font_size = span["size"]
docL = []
                    flags = span["flags"]
for s in doc.sections():
                    span_rect = span["bbox"]
    sectS = ''
    for p in s.children:
        sectS += p.to_text()
        if sectS == '':
            sectS = '-'
        docL.append(Document(text=sectS,metadata={"sect":s.to_context_text(),"lev":s.level}))
for t in doc.tables():
    docL.append(Document(text=t.to_text(),metadata={"table":s.block_idx,"lev":t.level}))


#---------------------------------------------pymupdf---------------------------
                    if self.is_horizontal_line(text):
                        line_text += "\n---\n"
                        continue
                   
                    text = self.clean_text(text)


import pymupdf4llm
                    if text.strip():
import pymupdf
                        header_level = self.get_header_level(font_size)                  
md_text = pymupdf4llm.to_markdown(pdf_doc,pages=[0,1])
                        if header_level > 0:
md_text = pymupdf4llm.to_markdown(pdf_doc)
                            text = f"\n{'#' * header_level} {text}\n\n"
# parser = LlamaParse(api_key="...",result_type="markdown")
# documents = parser.load_data("./my_file.pdf")
#single_sentences_list = re.split(r'(?<=[.?!])\s+', essay)
headers_split = [("#", "Chapter"),("##", "Section"),('###','Subsection')]
splitter = MarkdownHeaderTextSplitter(headers_split)#,strip_headers=True,return_each_line=False,)
docL = splitter.split_text(md_text)
#splitter = RecursiveCharacterTextSplitter(chunk_size = 1000, chunk_overlap=200)
#splitter = SentenceSplitter(chunk_size=200,chunk_overlap=15)
#elements = partition_pdf(filename=pdf_doc,strategy="hi_res",infer_table_structure=True,model_name="yolox")


os.environ["LLAMA_CLOUD_API_KEY"] = "llx-"
                        else:
llm = get_llm()
                            is_list_item = self.is_bullet_point(text) or self.is_numbered_list_item(text)
parsing_instructions = '''The document describes IT security policies for audit. It contains many tables. Answer questions using the information in this article and be precise.'''
documents = LlamaParse(result_type="markdown", parsing_instructions=parsing_instructions).load_data(pdf_doc)
print(documents[0].text[:1000])
node_parser = MarkdownElementNodeParser(llm=llm, num_workers=8).from_defaults()
nodes = node_parser.get_nodes_from_documents(documents)
base_nodes, objects = node_parser.get_nodes_and_objects(nodes)


#-------------------------------------------pypdf2------------------------------
                            if is_list_item:
                                marker, content = re.split(r'(?<=^[•◦▪▫●○\d.)])\s*', text, 1)
                                formatted_content = self.apply_formatting(content, flags)
                                text = f"{marker} {formatted_content}"
                            else:
                                text = self.apply_formatting(text, flags)
                               
                    for link in links:
                        if fitz.Rect(span_rect).intersects(link["rect"]):
                            text = f"[{text.strip()}]({link['uri']})"
                            break


from PyPDF2 import PdfReader
                    line_text += text
text = ""
docL = []
for pdf in pdf_docs:
    pdf_reader = PdfReader(pdf)
    for i, page in enumerate(pdf_reader.pages):
        text = page.extract_text()
        docL.append(Document(text=text,metadata={"page":i}))
       


                if last_y1 is not None:
                    avg_last_font_size = sum(last_font_size) / len(last_font_size) if last_font_size else 0
                    avg_current_font_size = sum(curr_font_size) / len(curr_font_size)
                    font_size_changed = abs(avg_current_font_size - avg_last_font_size) > 1


#-----------------------------------camelot-----------------------------
                    if abs(line["bbox"][3] - last_y1) > 2 or font_size_changed:
                        block_text += "\n"
                       
                block_text += self.clean_text(line_text) + " "
                last_font_size = curr_font_size
                last_y1 = line["bbox"][3]


import camelot
            markdown_content = ""
tables = camelot.read_pdf(fPath)
            lines = block_text.split('\n')
tDf = tables[0].df
            for i, line in enumerate(lines):
tDf.to_csv(baseDir + fName + ".csv")
                clean_line = self.clean_text(line)


#----------------------------------pdf-plumber-------------------------------
                if not in_code_block:
                    code_lang = self.detect_code_block(prev_line, clean_line)
                    if code_lang:
                        in_code_block = True
                        code_block_lang = code_lang
                        code_block_content = prev_line + "\n" + clean_line + "\n"
                        prev_line = clean_line
                        continue


import fitz
                if in_code_block:
import pdfplumber
                    code_block_content += clean_line + "\n"
from collections import Counter
                    if i == len(lines) - 1 or self.detect_code_block(clean_line, lines[i+1]) != code_block_lang:
from reportlab.lib.pagesizes import letter
                        markdown_content += f"```{code_block_lang}\n{code_block_content}```\n\n"
from reportlab.platypus import SimpleDocTemplate
                        in_code_block = False
from reportlab.lib.styles import getSampleStyleSheet
                        code_block_content = ""
from reportlab.platypus import SimpleDocTemplate, Preformatted
                        code_block_lang = None
                else:
                    if self.is_bullet_point(clean_line):
                        markdown_content += "\n" + self.convert_bullet_to_markdown(clean_line)
                        list_counter = 0
                    elif self.is_numbered_list_item(clean_line):
                        list_counter += 1
                        markdown_content += "\n" + self.convert_numbered_list_to_markdown(clean_line, list_counter) 
                    else:
                        markdown_content += f"{clean_line}\n"
                        list_counter = 0


font_size_counter = Counter()
                 prev_line = clean_line
with pdfplumber.open(fPath) as pdf:
    for i in range(len(pdf.pages)):
        words = pdf.pages[i].extract_words(extra_attrs=['fontname', 'size'])
        lines = {}
        for word in words:
            line_num = word['top']
            if line_num not in lines:
                 lines[line_num] = []
            lines[line_num].append(word)
        for line_words in lines.values():
            font_size_counter[line_words[0]['size']] += 1


repeated_sizes = [size for size, count in font_size_counter.items() if count > 1]
            return markdown_content + "\n"
extracted_font_size = max(repeated_sizes)
        except Exception as e:
            self.logger.error(f"Error processing text block: {e}")
            self.logger.exception(traceback.format_exc())
            return ""


chunks = extract_chunks_from_pdf(fPath, markers)
    def process_image_block(self, page, block):
        """Process an image block and convert it to markdown."""
        try:
            image_rect = block["bbox"]
            pix = page.get_pixmap(clip=image_rect)
            image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
           
            image_filename = f"{self.pdf_filename}_image_{int(page.number)+1}_{block['number']}.png"
            image.save(f"outputs/{image_filename}")


            caption = self.caption_image(image)


lines_with_target_font_size = []
            return f"![{caption}]({image_filename})\n\n"
with pdfplumber.open(fPath) as pdf:
         except Exception as e:
    for i in range(len(pdf.pages)):
             self.logger.error(f"Error processing image block: {e}")
        words = pdf.pages[i].extract_words(extra_attrs=['fontname', 'size'])
            self.logger.exception(traceback.format_exc())
        lines = {}
            return ""
        for word in words:
            line_num = word['top']
            if line_num not in lines:
                lines[line_num] = []
            lines[line_num].append(word)
         for line_num, line_words in lines.items():
             line_font_sizes = [word['size'] for word in line_words]
            if target_font_size in line_font_sizes:
                line_text = ' '.join([word['text'] for word in line_words])
                lines_with_target_font_size.append(line_text)
 
extracted_font_size = lines_with_target_font_size
               
doc = SimpleDocTemplate(output_fPath, pagesize=letter)
styles = getSampleStyleSheet()
story = []
for chunk in chunks:
    preformatted = Preformatted(chunk, styles["Normal"])
    story.append(preformatted)
doc.build(story)
 
if not os.path.exists(output_folder):
    os.makedirs(output_folder)
for i, chunk in enumerate(chunks, start=1):
    output_fPath = os.path.join(output_folder, f"output_pdf_part{i}.pdf")
    write_chunks_to_pdf([chunk], output_fPath)


chunks = []
    def get_header_level(self, font_size):
current_chunk = []
        """Determine header level based on font size."""
current_marker_index = 0
        if font_size > 24:
pdf_document = fitz.open(fPath)
            return 1
for page_num in range(pdf_document.page_count):
        elif font_size > 20:
    page = pdf_document[page_num]
            return 2
    text = page.get_text("text")
         elif font_size > 18:
    lines = text.split('\n')
             return 3
    for line in lines:
        elif font_size > 16:
         if current_marker_index < len(markers) and markers[current_marker_index] in line:
            return 4
             if current_chunk:
        elif font_size > 14:
                chunks.append('\n'.join(current_chunk))
             return 5
            current_chunk = []
         elif font_size > 12:
             current_marker_index += 1
            return 6
         current_chunk.append(line)
        else:
if current_chunk:
            return 0
    chunks.append('\n'.join(current_chunk))
pdf_document.close()
output_folder = "output"


#--------------------------------------------------------adobe---------------------------------------------
    def post_process_markdown(self, markdown_content):
        """Post-process the markdown content."""
        try:
            markdown_content = re.sub(r'\n{3,}', '\n\n', markdown_content)  # Remove excessive newlines
            markdown_content = re.sub(r'(\d+)\s*\n', '', markdown_content)  # Remove page numbers
            markdown_content = re.sub(r' +', ' ', markdown_content)  # Remove multiple spaces
            markdown_content = re.sub(r'\s*(---\n)+', '\n\n---\n', markdown_content)  # Remove duplicate horizontal lines


from adobe.pdfservices.operation.auth.service_principal_credentials import ServicePrincipalCredentials
            def remove_middle_headers(match):
from adobe.pdfservices.operation.exception.exceptions import ServiceApiException, ServiceUsageException, SdkException
                line = match.group(0)
from adobe.pdfservices.operation.io.cloud_asset import CloudAsset
                # Keep the initial header and remove all subsequent '#' characters
from adobe.pdfservices.operation.io.stream_asset import StreamAsset
                return re.sub(r'(^#{1,6}\s).*?(?=\n)', lambda m: m.group(1) + re.sub(r'#', '', m.group(0)[len(m.group(1)):]), line)
from adobe.pdfservices.operation.pdf_services import PDFServices
           
from adobe.pdfservices.operation.pdf_services_media_type import PDFServicesMediaType
            markdown_content = re.sub(r'^#{1,6}\s.*\n', remove_middle_headers, markdown_content, flags=re.MULTILINE) # Remove headers in the middle of lines       
from adobe.pdfservices.operation.pdfjobs.jobs.export_pdf_job import ExportPDFJob
            return markdown_content
from adobe.pdfservices.operation.pdfjobs.params.export_pdf.export_pdf_params import ExportPDFParams
        except Exception as e:
from adobe.pdfservices.operation.pdfjobs.params.export_pdf.export_pdf_target_format import ExportPDFTargetFormat
            self.logger.error(f"Error post-processing markdown: {e}")
from adobe.pdfservices.operation.pdfjobs.result.export_pdf_result import ExportPDFResult
            self.logger.exception(traceback.format_exc())
            return markdown_content


credentials = ServicePrincipalCredentials(
     def save_markdown(self, markdown_content):
     client_id=os.getenv('PDF_SERVICES_CLIENT_ID'),
        """Save the markdown content to a file."""
    client_secret=os.getenv('PDF_SERVICES_CLIENT_SECRET'))
        try:
pdf_services = PDFServices(credentials=credentials)
            os.makedirs("outputs", exist_ok=True)
file = open('src/resources/Bodea Brochure.pdf', 'rb')
            with open(f"outputs/{self.pdf_filename}.md", "w", encoding="utf-8") as f:
input_stream = file.read()
                f.write(markdown_content)
file.close()
                self.logger.info("Markdown content saved successfully.")
input_asset = pdf_services.upload(input_stream=input_stream, mime_type=PDFServicesMediaType.PDF)
        except Exception as e:
export_pdf_params = ExportPDFParams(target_format=ExportPDFTargetFormat.DOCX)
            self.logger.error(f"Error saving markdown content: {e}")
export_pdf_job = ExportPDFJob(input_asset=input_asset, export_pdf_params=export_pdf_params)
            self.logger.exception(traceback.format_exc())
location = pdf_services.submit(export_pdf_job)
pdf_services_response = pdf_services.get_job_result(location, ExportPDFResult)
result_asset: CloudAsset = pdf_services_response.get_result().get_asset()
stream_asset: StreamAsset = pdf_services.get_content(result_asset)
output_file_path = "./Bodea Brochure.docx"
with open(output_file_path, "wb") as file:
    file.write(stream_asset.get_input_stream())


#-----------------------------------nougat-ocr----------------------------------
def main():
#-----------------------------------marker-pdf----------------------------------
    parser = argparse.ArgumentParser(description="Extract markdown-formatted content from a PDF file.")
    parser.add_argument("pdf_path", help="Path to the input PDF file")
    args = parser.parse_args()


      
     extractor = MarkdownPDFExtractor(args.pdf_path)
print("te se qe te ve be te ne?")
    markdown_pages = extractor.extract()
    return markdown_pages


#https://www.jnjmedtech.com/system/files/pdf/090912-220322%20DSUS_EMEA%20Large%20Bone%20Saw%20Blades%20Product%20Brochure.pdf
if __name__ == "__main__":
    main()

Revision as of 12:09, 6 November 2024

import fitz # PyMuPDF import pdfplumber import re import pytesseract import cv2 import numpy as np from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer import torch from PIL import Image import os import logging import traceback import warnings from pathlib import Path from abc import ABC, abstractmethod import argparse

warnings.filterwarnings("ignore")

class PDFExtractor(ABC):

   """Abstract base class for PDF extraction."""
   def __init__(self, pdf_path):
       self.pdf_path = pdf_path
       self.setup_logging()
   def setup_logging(self):
       """Set up logging configuration."""
       log_file = f"{Path(__file__).stem}.log"
       logging.basicConfig(
           level=logging.INFO,
           format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
           handlers=[
               logging.FileHandler(log_file),
               logging.StreamHandler()
           ]
       )
       self.logger = logging.getLogger(__name__)
   @abstractmethod
   def extract(self):
       """Abstract method for extracting content from PDF."""
       pass

class MarkdownPDFExtractor(PDFExtractor):

   """Class for extracting markdown-formatted content from PDF."""
   BULLET_POINTS = '•◦▪▫●○'
   def __init__(self, pdf_path):
       super().__init__(pdf_path)
       self.pdf_filename = Path(pdf_path).stem
       self.setup_image_captioning()
   def setup_image_captioning(self):
       """Set up the image captioning model."""
       try:
           self.model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
           self.feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
           self.tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
           self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
           self.model.to(self.device)
           self.logger.info("Image captioning model set up successfully.")
       except Exception as e:
           self.logger.error(f"Error setting up image captioning model: {e}")
           self.logger.exception(traceback.format_exc())
   def extract(self):
       try:
           markdown_content, markdown_pages = self.extract_markdown()
           self.save_markdown(markdown_content)
           self.logger.info(f"Markdown content has been saved to outputs/{self.pdf_filename}.md")
           return markdown_pages
       
       except Exception as e:
           self.logger.error(f"Error processing PDF: {e}")
           self.logger.exception(traceback.format_exc())
           return "", []
   def extract_markdown(self):
       """Main method to extract markdown from PDF."""
       try:
           doc = fitz.open(self.pdf_path)
           markdown_content = ""
           markdown_pages = []
           tables = self.extract_tables()
           table_index = 0
           list_counter = 0
           in_code_block = False
           code_block_content = ""
           code_block_lang = None
           prev_line = ""
           for page_num, page in enumerate(doc):
               self.logger.info(f"Processing page {page_num + 1}")
               page_content = ""
               blocks = page.get_text("dict")["blocks"]
               page_height = page.rect.height
               links = self.extract_links(page)
               for block in blocks:
                   if block["type"] == 0:  # Text
                       page_content += self.process_text_block(block, page_height, links, list_counter, in_code_block, code_block_content, code_block_lang, prev_line)
                   elif block["type"] == 1:  # Image
                       page_content += self.process_image_block(page, block)
               # Insert tables at their approximate positions
               while table_index < len(tables) and tables[table_index]["page"] == page.number:
                   page_content += "\n\n" + self.table_to_markdown(tables[table_index]["content"]) + "\n\n"
                   table_index += 1
               markdown_pages.append(self.post_process_markdown(page_content))
               markdown_content += page_content
           markdown_content = self.post_process_markdown(markdown_content)
           return markdown_content, markdown_pages
       except Exception as e:
           self.logger.error(f"Error extracting markdown: {e}")
           self.logger.exception(traceback.format_exc())
           return "", []
   def extract_tables(self):
       """Extract tables from PDF using pdfplumber."""
       tables = []
       try:
           with pdfplumber.open(self.pdf_path) as pdf:
               for page_number, page in enumerate(pdf.pages):
                   page_tables = page.extract_tables()
                   for table in page_tables:
                       tables.append({"page": page_number, "content": table})
                       self.logger.info(f"Extracted {len(tables)} tables from the PDF.")
       except Exception as e:
           self.logger.error(f"Error extracting tables: {e}")
           self.logger.exception(traceback.format_exc())
       return tables
   def table_to_markdown(self, table):
       """Convert a table to markdown format."""
       if not table:
           return ""
       try:
           table = [[ if cell is None else str(cell).strip() for cell in row] for row in table]
           col_widths = [max(len(cell) for cell in col) for col in zip(*table)]
           markdown = ""
           for i, row in enumerate(table):
               formatted_row = [cell.ljust(col_widths[j]) for j, cell in enumerate(row)]
               markdown += "| " + " | ".join(formatted_row) + " |\n"
               if i == 0:
                   markdown += "|" + "|".join(["-" * (width + 2) for width in col_widths]) + "|\n"
           return markdown
       except Exception as e:
           self.logger.error(f"Error converting table to markdown: {e}")
           self.logger.exception(traceback.format_exc())
           return ""
   def perform_ocr(self, image):
       """Perform OCR on the given image."""
       try:
           opencv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
           ocr_result = pytesseract.image_to_data(opencv_image, output_type=pytesseract.Output.DICT)
           
           result = ""
           for word in ocr_result['text']:
               if word.strip() != "":
                   result += word + " "
               if len(result) > 30:
                   break
               
           return result.strip()
       except Exception as e:
           self.logger.error(f"Error performing OCR: {e}")
           self.logger.exception(traceback.format_exc())
           return ""
   def caption_image(self, image):
       """Generate a caption for the given image."""
       try:
           ocr_text = self.perform_ocr(image)
           if ocr_text:
               return ocr_text
           
           inputs = self.feature_extractor(images=image, return_tensors="pt").to(self.device)
           pixel_values = inputs.pixel_values
           generated_ids = self.model.generate(pixel_values, max_length=30)
           generated_caption = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
           return generated_caption.strip()
       except Exception as e:
           self.logger.error(f"Error captioning image: {e}")
           self.logger.exception(traceback.format_exc())
           return "Image"
   def clean_text(self, text):
       """Clean the given text by removing extra spaces."""
       text = text.strip()
       text = re.sub(r'\s+', ' ', text)
       return text
   def apply_formatting(self, text, flags):
       """Apply markdown formatting to the given text based on flags."""
       text = text.strip()
       if not text:
           return text
       
       is_bold = flags & 2**4
       is_italic = flags & 2**1
       is_monospace = flags & 2**3
       is_superscript = flags & 2**0
       is_subscript = flags & 2**5 
       if is_monospace:
           text = f"`{text}`"
       elif is_superscript and not bool(re.search(r'\s+', text)):
           text = f"^{text}^"
       elif is_subscript and not bool(re.search(r'\s+', text)):
           text = f"~{text}~"
       if is_bold and is_italic:
           text = f"***{text}***"
       elif is_bold:
           text = f"**{text}**"
       elif is_italic:
           text = f"*{text}*"
       return f" {text} "
   def is_bullet_point(self, text):
       """Check if the given text is a bullet point."""
       return text.strip().startswith(tuple(self.BULLET_POINTS))
   def convert_bullet_to_markdown(self, text):
       """Convert a bullet point to markdown format."""
       text = re.sub(r'^\s*', , text)
       return re.sub(f'^[{re.escape(self.BULLET_POINTS)}]\s*', '- ', text)
   def is_numbered_list_item(self, text):
       """Check if the given text is a numbered list item."""
       return bool(re.match(r'^\d+\s{0,3}[.)]', text.strip()))
   def convert_numbered_list_to_markdown(self, text, list_counter):
       """Convert a numbered list item to markdown format."""
       text = re.sub(r'^\s*', , text)
       return re.sub(r'^\d+\s{0,3}[.)]', f"{list_counter}. ", text)
   def is_horizontal_line(self, text):
       """Check if the given text represents a horizontal line."""
       return bool(re.match(r'^[_-]+$', text.strip()))
   def extract_links(self, page):
       """Extract links from the given page."""
       links = []
       try:
           for link in page.get_links():
               if link["kind"] == 2:  # URI link
                   links.append({
                       "rect": link["from"],
                       "uri": link["uri"]
                   })
                   self.logger.info(f"Extracted {len(links)} links from the page.")
       except Exception as e:
           self.logger.error(f"Error extracting links: {e}")
           self.logger.exception(traceback.format_exc())
       return links
   def detect_code_block(self, prev_line, current_line):
       """Detect if the current line starts a code block."""
       patterns = {
           'python': [
               (r'^(?:from|import)\s+\w+', r'^(?:from|import|def|class|if|for|while|try|except|with)\s'),
               (r'^(?:def|class)\s+\w+', r'^\s{4}'),
               (r'^\s{4}', r'^\s{4,}')
           ],
           'javascript': [
               (r'^(?:function|const|let|var)\s+\w+', r'^(?:function|const|let|var|if|for|while|try|catch|class)\s'),
               (r'^(?:if|for|while)\s*\(', r'^\s{2,}'),
               (r'^\s{2,}', r'^\s{2,}')
           ],
           'html': [
               (r'^<(!DOCTYPE|html|head|body|div|p|a|script|style)', r'^<(!DOCTYPE|html|head|body|div|p|a|script|style)'),
               (r'^<\w+.*>$', r'^\s{2,}<'),
               (r'^\s{2,}<', r'^\s{2,}<')
           ],
           'shell': [
               (r'^(?:\$|\#)\s', r'^(?:\$|\#)\s'),
               (r'^[a-z_]+\s*=', r'^[a-z_]+\s*=')
           ],
           'bash': [
               (r'^(?:#!/bin/bash|alias|export|source)\s', r'^(?:#!/bin/bash|alias|export|source|echo|read|if|for|while|case|function)\s'),
               (r'^(?:if|for|while|case|function)\s', r'^\s{2,}'),
               (r'^\s{2,}', r'^\s{2,}')
           ],
           'cpp': [
               (r'^#include\s*<', r'^(?:#include|using|namespace|class|struct|enum|template|typedef)\s'),
               (r'^(?:class|struct|enum)\s+\w+', r'^\s{2,}'),
               (r'^\s{2,}', r'^\s{2,}')
           ],
           'java': [
               (r'^(?:import|package)\s+\w+', r'^(?:import|package|public|private|protected|class|interface|enum)\s'),
               (r'^(?:public|private|protected)\s+class\s+\w+', r'^\s{4,}'),
               (r'^\s{4,}', r'^\s{4,}')
           ],
           'json': [
               (r'^\s*{', r'^\s*["{[]'),
               (r'^\s*"', r'^\s*["}],?$'),
               (r'^\s*\[', r'^\s*[}\]],?$')
           ]
       }
       
       for lang, pattern_pairs in patterns.items():
           for prev_pattern, curr_pattern in pattern_pairs:
               if (re.match(prev_pattern, prev_line.strip()) and 
                   re.match(curr_pattern, current_line.strip())):
                   return lang
               
       return None
   def process_text_block(self, block, page_height, links, list_counter, in_code_block, code_block_content, code_block_lang, prev_line):
       """Process a text block and convert it to markdown."""
       try:
           block_rect = block["bbox"]
           if block_rect[1] < 50 or block_rect[3] > page_height - 50:
               return ""  # Skip headers and footers
           block_text = ""
           last_y1 = None
           last_font_size = None
           for line in block["lines"]:
               line_text = ""
               curr_font_size = [span["size"] for span in line["spans"]]
               for span in line["spans"]:
                   text = span["text"]
                   font_size = span["size"]
                   flags = span["flags"]
                   span_rect = span["bbox"]
                   if self.is_horizontal_line(text):
                       line_text += "\n---\n"
                       continue
                   
                   text = self.clean_text(text)
                   if text.strip():
                       header_level = self.get_header_level(font_size)                   
                       if header_level > 0:
                           text = f"\n{'#' * header_level} {text}\n\n"
                       else:
                           is_list_item = self.is_bullet_point(text) or self.is_numbered_list_item(text)
                           if is_list_item:
                               marker, content = re.split(r'(?<=^[•◦▪▫●○\d.)])\s*', text, 1)
                               formatted_content = self.apply_formatting(content, flags)
                               text = f"{marker} {formatted_content}"
                           else:
                               text = self.apply_formatting(text, flags)
                               
                   for link in links:
                       if fitz.Rect(span_rect).intersects(link["rect"]):
                           text = f"[{text.strip()}]({link['uri']})"
                           break
                   line_text += text
               if last_y1 is not None:
                   avg_last_font_size = sum(last_font_size) / len(last_font_size) if last_font_size else 0
                   avg_current_font_size = sum(curr_font_size) / len(curr_font_size)
                   font_size_changed = abs(avg_current_font_size - avg_last_font_size) > 1
                   if abs(line["bbox"][3] - last_y1) > 2 or font_size_changed:
                       block_text += "\n"
                       
               block_text += self.clean_text(line_text) + " "
               last_font_size = curr_font_size
               last_y1 = line["bbox"][3]
           markdown_content = ""
           lines = block_text.split('\n')
           for i, line in enumerate(lines):
               clean_line = self.clean_text(line)
               if not in_code_block:
                   code_lang = self.detect_code_block(prev_line, clean_line)
                   if code_lang:
                       in_code_block = True
                       code_block_lang = code_lang
                       code_block_content = prev_line + "\n" + clean_line + "\n"
                       prev_line = clean_line
                       continue
               if in_code_block:
                   code_block_content += clean_line + "\n"
                   if i == len(lines) - 1 or self.detect_code_block(clean_line, lines[i+1]) != code_block_lang:
                       markdown_content += f"```{code_block_lang}\n{code_block_content}```\n\n"
                       in_code_block = False
                       code_block_content = ""
                       code_block_lang = None
               else:
                   if self.is_bullet_point(clean_line):
                       markdown_content += "\n" + self.convert_bullet_to_markdown(clean_line)
                       list_counter = 0
                   elif self.is_numbered_list_item(clean_line):
                       list_counter += 1
                       markdown_content += "\n" + self.convert_numbered_list_to_markdown(clean_line, list_counter)   
                   else:
                       markdown_content += f"{clean_line}\n"
                       list_counter = 0
               prev_line = clean_line
           return markdown_content + "\n"
       except Exception as e:
           self.logger.error(f"Error processing text block: {e}")
           self.logger.exception(traceback.format_exc())
           return ""
   def process_image_block(self, page, block):
       """Process an image block and convert it to markdown."""
       try:
           image_rect = block["bbox"]
           pix = page.get_pixmap(clip=image_rect)
           image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
           
           image_filename = f"{self.pdf_filename}_image_{int(page.number)+1}_{block['number']}.png"
           image.save(f"outputs/{image_filename}")
           caption = self.caption_image(image)
           return f"![{caption}]({image_filename})\n\n"
       except Exception as e:
           self.logger.error(f"Error processing image block: {e}")
           self.logger.exception(traceback.format_exc())
           return ""
   def get_header_level(self, font_size):
       """Determine header level based on font size."""
       if font_size > 24:
           return 1
       elif font_size > 20:
           return 2
       elif font_size > 18:
           return 3
       elif font_size > 16:
           return 4
       elif font_size > 14:
           return 5
       elif font_size > 12:
           return 6
       else:
           return 0
   def post_process_markdown(self, markdown_content):
       """Post-process the markdown content."""
       try:
           markdown_content = re.sub(r'\n{3,}', '\n\n', markdown_content)  # Remove excessive newlines
           markdown_content = re.sub(r'(\d+)\s*\n', , markdown_content)  # Remove page numbers
           markdown_content = re.sub(r' +', ' ', markdown_content)  # Remove multiple spaces
           markdown_content = re.sub(r'\s*(---\n)+', '\n\n---\n', markdown_content)  # Remove duplicate horizontal lines
           def remove_middle_headers(match):
               line = match.group(0)
               # Keep the initial header and remove all subsequent '#' characters
               return re.sub(r'(^#{1,6}\s).*?(?=\n)', lambda m: m.group(1) + re.sub(r'#', , m.group(0)[len(m.group(1)):]), line)
           
           markdown_content = re.sub(r'^#{1,6}\s.*\n', remove_middle_headers, markdown_content, flags=re.MULTILINE) # Remove headers in the middle of lines         
           return markdown_content
       except Exception as e:
           self.logger.error(f"Error post-processing markdown: {e}")
           self.logger.exception(traceback.format_exc())
           return markdown_content
   def save_markdown(self, markdown_content):
       """Save the markdown content to a file."""
       try:
           os.makedirs("outputs", exist_ok=True)
           with open(f"outputs/{self.pdf_filename}.md", "w", encoding="utf-8") as f:
               f.write(markdown_content)
               self.logger.info("Markdown content saved successfully.")
       except Exception as e:
           self.logger.error(f"Error saving markdown content: {e}")
           self.logger.exception(traceback.format_exc())

def main():

   parser = argparse.ArgumentParser(description="Extract markdown-formatted content from a PDF file.")
   parser.add_argument("pdf_path", help="Path to the input PDF file")
   args = parser.parse_args()
   extractor = MarkdownPDFExtractor(args.pdf_path)
   markdown_pages = extractor.extract()
   return markdown_pages

if __name__ == "__main__":

   main()