No edit summary
No edit summary
Line 1: Line 1:
import fitz  # PyMuPDF
import pdfplumber
import re
import pytesseract
import cv2
import numpy as np
from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
import torch
from PIL import Image
import os
import os
import logging
from streamlit_interface import st
import traceback
import chatbot_utils as c_t
import warnings
from langchain_core.chat_history import BaseChatMessageHistory
from pathlib import Path
from abc import ABC, abstractmethod
import argparse


warnings.filterwarnings("ignore")
modL = ["gpt-4o@openai","gpt-4-turbo@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","llama-2-70b-chat@aws-bedrock","codellama-34b-instruct@together-ai","gemma-7b-it@fireworks-ai","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic","mistral-7b-instruct-v0.1@fireworks-ai","mistral-7b-instruct-v0.2@fireworks-ai"]
dynamic_provider = ["lowest-input-cost", "lowest-output-cost", "lowest-itl", "lowest-ttft", "highest-tks-per-sec"]
model_reset_dict = {"slider_model_temperature": "model_temperature"}
splitter_reset_dict = {"slider_chunk_size": "chunk_size","slider_chunk_overlap": "chunk_overlap"}
retriever_reset_dict = {"slider_k": "k","slider_fetch_k": "fetch_k","slider_lambda_mult": "lambda_mult","slider_score_threshold": "score_threshold"}
model_max_context_limit = {"mixtral-8x7b-instruct-v0.1": 32000,"llama-2-70b-chat": 4096,"llama-2-13b-chat": 4096,"mistral-7b-instruct-v0.2": 8192,"llama-2-7b-chat": 4096,"codellama-34b-instruct": 4096,"gemma-7b-it": 8192,"mistral-7b-instruct-v0.1": 512,"mixtral-8x22b-instruct-v0.1": 65536,"codellama-13b-instruct": 4096,"codellama-7b-instruct": 4096,"yi-34b-chat": 4096,"llama-3-8b-chat": 8192,"llama-3-70b-chat": 8192,"pplx-7b-chat": 4096,"mistral-medium": 32000,"gpt-4o": 32000,"gpt-4": 32000,"pplx-70b-chat": 4096,"gpt-3.5-turbo": 16000,"deepseek-coder-33b-instruct": 16000,"gemma-2b-it": 8192,"gpt-4-turbo": 128000,"mistral-small": 32000,"mistral-large": 32000,"claude-3-haiku": 200000,"claude-3-opus": 200000,"claude-3-sonnet": 200000}
baseDir = os.environ['HOME'] + '/lav/dauvi/portfolio/audit/'


class PDFExtractor(ABC):
    """Abstract base class for PDF extraction."""


    def __init__(self, pdf_path):
#---------------------------------------------------UI--------------------------------------------------
        self.pdf_path = pdf_path
        self.setup_logging()
 
    def setup_logging(self):
        """Set up logging configuration."""
        log_file = f"{Path(__file__).stem}.log"
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file),
                logging.StreamHandler()
            ]
        )
        self.logger = logging.getLogger(__name__)
 
    @abstractmethod
    def extract(self):
        """Abstract method for extracting content from PDF."""
        pass
 
class MarkdownPDFExtractor(PDFExtractor):
    """Class for extracting markdown-formatted content from PDF."""
 
    BULLET_POINTS = '•◦▪▫●○'
 
    def __init__(self, pdf_path):
        super().__init__(pdf_path)
        self.pdf_filename = Path(pdf_path).stem
        self.setup_image_captioning()
 
    def setup_image_captioning(self):
        """Set up the image captioning model."""
        try:
            self.model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
            self.feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
            self.tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            self.model.to(self.device)
            self.logger.info("Image captioning model set up successfully.")
        except Exception as e:
            self.logger.error(f"Error setting up image captioning model: {e}")
            self.logger.exception(traceback.format_exc())
 
    def extract(self):
        try:
            markdown_content, markdown_pages = self.extract_markdown()
            self.save_markdown(markdown_content)
            self.logger.info(f"Markdown content has been saved to outputs/{self.pdf_filename}.md")
            return markdown_pages
          
          
        except Exception as e:
def clear_history():
            self.logger.error(f"Error processing PDF: {e}")
    """Clears the history stored in the session state."""
            self.logger.exception(traceback.format_exc())
    if "store" in st.session_state:
            return "", []
        st.session_state.store = {}
    if "messages" in st.session_state:
        st.session_state.messages = []


    def extract_markdown(self):
def cite_response():
        """Main method to extract markdown from PDF."""
    """Cite a reference."""
        try:
    messL = st.session_state.messages
            doc = fitz.open(self.pdf_path)
    query = messL[-1][0]
            markdown_content = ""
    retriever = get_retriever()
            markdown_pages = []
    docL = retriever.get_relevant_documents(query)
            tables = self.extract_tables()
    docT = [x.page_content for x in docL]
            table_index = 0
    docS = "Following list of original documents\n\n"
            list_counter = 0
    for i,s in enumerate(docT):
            in_code_block = False
        docS += "-------- Citation " + str(i+1) + " )\n\n" + s
            code_block_content = ""
    st.session_state.messages.append(("Citations for: " + query,docS))
            code_block_lang = None
   
            prev_line = ""


            for page_num, page in enumerate(doc):
def output_chunks(chain, query):
                self.logger.info(f"Processing page {page_num + 1}")
    """Generates answers for the given query and a chain.
                page_content = ""
                blocks = page.get_text("dict")["blocks"]
                page_height = page.rect.height
                links = self.extract_links(page)


                for block in blocks:
    Args:
                    if block["type"] == 0: # Text
        chain: The chain given by the user selection.
                        page_content += self.process_text_block(block, page_height, links, list_counter, in_code_block, code_block_content, code_block_lang, prev_line)
        query: The query to generate answers for.
                    elif block["type"] == 1: # Image
                        page_content += self.process_image_block(page, block)


                # Insert tables at their approximate positions
    Yields:
                while table_index < len(tables) and tables[table_index]["page"] == page.number:
        str: The generated answer.
                    page_content += "\n\n" + self.table_to_markdown(tables[table_index]["content"]) + "\n\n"
    """
                    table_index += 1
    for chunk in chain.stream(
            {"input": query},
            config={"configurable": {"session_id": "abc123"}}
    ):
        if "answer" in chunk.keys():
            yield chunk["answer"]


                markdown_pages.append(self.post_process_markdown(page_content))
def get_history(session_id: str):
                markdown_content += page_content
  """
        Retrieves the chat history for a given session.
        Parameters:
        session_id (str): The ID of the session.
        Returns:
        BaseChatMessageHistory: The chat history for the provided session ID.
  """
  if session_id not in st.session_state.store:
    st.session_state.store[session_id] = c_t.get_chat_message()
  return st.session_state.store[session_id]


            markdown_content = self.post_process_markdown(markdown_content)
def field_callback(field):
            return markdown_content, markdown_pages
    """Displays a toast message when a field is updated."""
        except Exception as e:
    st.toast(f"{field} Updated Successfully!", icon="🎉")
            self.logger.error(f"Error extracting markdown: {e}")
            self.logger.exception(traceback.format_exc())
            return "", []


    def extract_tables(self):
def process_inputs():
        """Extract tables from PDF using pdfplumber."""
    """Processes the user inputs and performs vector storage."""
         tables = []
   
        try:
    if not st.session_state.unify_api_key or not st.session_state.endpoint or not st.session_state.pdf_docs:
            with pdfplumber.open(self.pdf_path) as pdf:
         st.warning("Please enter the missing fields and upload your pdf document(s)")
                for page_number, page in enumerate(pdf.pages):
    else:
                    page_tables = page.extract_tables()
        with st.status("Processing Document(s)"):
                    for table in page_tables:
            st.write("Extracting Text")
                        tables.append({"page": page_number, "content": table})
            docL = c_t.pdf_page(st.session_state.pdf_docs,chunk_size=st.session_state.chunk_size,chunk_overlap=st.session_state.chunk_overlap)
                        self.logger.info(f"Extracted {len(tables)} tables from the PDF.")
            st.write("Splitting Text")
        except Exception as e:
            st.write("Performing Vector Storage")
            self.logger.error(f"Error extracting tables: {e}")
            if st.session_state.vector_selection == "FAISS":
             self.logger.exception(traceback.format_exc())
                st.session_state.vector_store = c_t.faiss_vector_storage(docL,collN="web",baseDir=baseDir)
        return tables
            if st.session_state.vector_selection == "chromadb":
                st.session_state.vector_store = c_t.create_collection(docL,collN="web",baseDir=baseDir)
             elif st.session_state.vector_selection == "Pinecone":
                st.session_state.vector_store = c_t.pinecone_vector_storage(docL)


    def table_to_markdown(self, table):
            st.session_state.processed_input = True
        """Convert a table to markdown format."""
             st.success('File(s) Submitted successfully!')
        if not table:
             return ""


        try:
def reset_slider_value(reset_dict):
            table = [['' if cell is None else str(cell).strip() for cell in row] for row in table]
    '''Resets the value of sliders in the session state.'''
            col_widths = [max(len(cell) for cell in col) for col in zip(*table)]
    for key, value in reset_dict.items():
        del st.session_state[value]
        init_keys()
        st.session_state[key] = st.session_state[value]


            markdown = ""
def get_retriever():
            for i, row in enumerate(table):
    """ Creates a retriever using the vector store in the session state and the selected search parameters."""
                formatted_row = [cell.ljust(col_widths[j]) for j, cell in enumerate(row)]
     if st.session_state.search_type == "similarity":
                markdown += "| " + " | ".join(formatted_row) + " |\n"
        st.session_state.search_kwargs = {"k": st.session_state.k}
 
     elif st.session_state.search_type == "similarity_score_threshold":
                if i == 0:
        st.session_state.search_kwargs = {
                    markdown += "|" + "|".join(["-" * (width + 2) for width in col_widths]) + "|\n"
             "k": st.session_state.k,
 
            "score_threshold": st.session_state.score_threshold
            return markdown
         }
        except Exception as e:
    elif st.session_state.search_type == "mmr":
            self.logger.error(f"Error converting table to markdown: {e}")
         st.session_state.search_kwargs = {
            self.logger.exception(traceback.format_exc())
            "k": st.session_state.k,
            return ""
            "fetch_k": st.session_state.fetch_k,
 
             "lambda_mult": st.session_state.lambda_mult
     def perform_ocr(self, image):
        """Perform OCR on the given image."""
        try:
            opencv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
            ocr_result = pytesseract.image_to_data(opencv_image, output_type=pytesseract.Output.DICT)
           
            result = ""
            for word in ocr_result['text']:
                if word.strip() != "":
                    result += word + " "
 
                if len(result) > 30:
                    break
               
            return result.strip()
        except Exception as e:
            self.logger.error(f"Error performing OCR: {e}")
            self.logger.exception(traceback.format_exc())
            return ""
 
     def caption_image(self, image):
        """Generate a caption for the given image."""
        try:
            ocr_text = self.perform_ocr(image)
            if ocr_text:
                return ocr_text
           
            inputs = self.feature_extractor(images=image, return_tensors="pt").to(self.device)
            pixel_values = inputs.pixel_values
 
            generated_ids = self.model.generate(pixel_values, max_length=30)
            generated_caption = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
            return generated_caption.strip()
        except Exception as e:
            self.logger.error(f"Error captioning image: {e}")
             self.logger.exception(traceback.format_exc())
            return "Image"
 
    def clean_text(self, text):
        """Clean the given text by removing extra spaces."""
        text = text.strip()
        text = re.sub(r'\s+', ' ', text)
        return text
 
    def apply_formatting(self, text, flags):
        """Apply markdown formatting to the given text based on flags."""
        text = text.strip()
        if not text:
            return text
       
         is_bold = flags & 2**4
        is_italic = flags & 2**1
        is_monospace = flags & 2**3
        is_superscript = flags & 2**0
        is_subscript = flags & 2**5
 
        if is_monospace:
            text = f"`{text}`"
        elif is_superscript and not bool(re.search(r'\s+', text)):
            text = f"^{text}^"
        elif is_subscript and not bool(re.search(r'\s+', text)):
            text = f"~{text}~"
 
        if is_bold and is_italic:
            text = f"***{text}***"
        elif is_bold:
            text = f"**{text}**"
        elif is_italic:
            text = f"*{text}*"
 
        return f" {text} "
 
    def is_bullet_point(self, text):
        """Check if the given text is a bullet point."""
         return text.strip().startswith(tuple(self.BULLET_POINTS))
 
    def convert_bullet_to_markdown(self, text):
        """Convert a bullet point to markdown format."""
        text = re.sub(r'^\s*', '', text)
        return re.sub(f'^[{re.escape(self.BULLET_POINTS)}]\s*', '- ', text)
 
    def is_numbered_list_item(self, text):
        """Check if the given text is a numbered list item."""
        return bool(re.match(r'^\d+\s{0,3}[.)]', text.strip()))
 
    def convert_numbered_list_to_markdown(self, text, list_counter):
        """Convert a numbered list item to markdown format."""
        text = re.sub(r'^\s*', '', text)
        return re.sub(r'^\d+\s{0,3}[.)]', f"{list_counter}. ", text)
 
    def is_horizontal_line(self, text):
        """Check if the given text represents a horizontal line."""
        return bool(re.match(r'^[_-]+$', text.strip()))
 
    def extract_links(self, page):
        """Extract links from the given page."""
        links = []
        try:
             for link in page.get_links():
                if link["kind"] == 2: # URI link
                    links.append({
                        "rect": link["from"],
                        "uri": link["uri"]
                    })
                    self.logger.info(f"Extracted {len(links)} links from the page.")
        except Exception as e:
            self.logger.error(f"Error extracting links: {e}")
            self.logger.exception(traceback.format_exc())
        return links
 
    def detect_code_block(self, prev_line, current_line):
        """Detect if the current line starts a code block."""
        patterns = {
            'python': [
                (r'^(?:from|import)\s+\w+', r'^(?:from|import|def|class|if|for|while|try|except|with)\s'),
                (r'^(?:def|class)\s+\w+', r'^\s{4}'),
                (r'^\s{4}', r'^\s{4,}')
            ],
            'javascript': [
                (r'^(?:function|const|let|var)\s+\w+', r'^(?:function|const|let|var|if|for|while|try|catch|class)\s'),
                (r'^(?:if|for|while)\s*\(', r'^\s{2,}'),
                (r'^\s{2,}', r'^\s{2,}')
            ],
            'html': [
                (r'^<(!DOCTYPE|html|head|body|div|p|a|script|style)', r'^<(!DOCTYPE|html|head|body|div|p|a|script|style)'),
                (r'^<\w+.*>$', r'^\s{2,}<'),
                (r'^\s{2,}<', r'^\s{2,}<')
            ],
            'shell': [
                (r'^(?:\$|\#)\s', r'^(?:\$|\#)\s'),
                (r'^[a-z_]+\s*=', r'^[a-z_]+\s*=')
            ],
            'bash': [
                (r'^(?:#!/bin/bash|alias|export|source)\s', r'^(?:#!/bin/bash|alias|export|source|echo|read|if|for|while|case|function)\s'),
                (r'^(?:if|for|while|case|function)\s', r'^\s{2,}'),
                (r'^\s{2,}', r'^\s{2,}')
            ],
            'cpp': [
                (r'^#include\s*<', r'^(?:#include|using|namespace|class|struct|enum|template|typedef)\s'),
                (r'^(?:class|struct|enum)\s+\w+', r'^\s{2,}'),
                (r'^\s{2,}', r'^\s{2,}')
            ],
            'java': [
                (r'^(?:import|package)\s+\w+', r'^(?:import|package|public|private|protected|class|interface|enum)\s'),
                (r'^(?:public|private|protected)\s+class\s+\w+', r'^\s{4,}'),
                (r'^\s{4,}', r'^\s{4,}')
            ],
            'json': [
                (r'^\s*{', r'^\s*["{[]'),
                (r'^\s*"', r'^\s*["}],?$'),
                (r'^\s*\[', r'^\s*[}\]],?$')
            ]
         }
         }
       
    retriever = st.session_state.vector_store.as_retriever(
         for lang, pattern_pairs in patterns.items():
         search_type=st.session_state.search_type,
            for prev_pattern, curr_pattern in pattern_pairs:
        search_kwargs=st.session_state.search_kwargs
                if (re.match(prev_pattern, prev_line.strip()) and
    )
                    re.match(curr_pattern, current_line.strip())):
    return retriever
                    return lang
               
        return None


    def process_text_block(self, block, page_height, links, list_counter, in_code_block, code_block_content, code_block_lang, prev_line):
def agent_definition():
        """Process a text block and convert it to markdown."""
    agentDef = "You are an assistant for question-answering tasks."
         try:
    if st.session_state.agent_selection == "👶 simple":
            block_rect = block["bbox"]
        agentDef = "You are an assistant who is able to interact with a child."
            if block_rect[1] < 50 or block_rect[3] > page_height - 50:
    elif st.session_state.agent_selection == "🧑‍🎓 academic":
                return "" # Skip headers and footers
         agentDef = "You are an assistant providing academic level of answers."
    elif st.session_state.agent_selection == "🧑‍🔧 technical":
        agentDef = "You are a technical expert explaining the solution in detail"
    elif st.session_state.agent_selection == "🧑‍🏫 didactic":
        agentDef = "You are a teacher explaining in a didactic way to a large audience"
    elif st.session_state.agent_selection == "🤖 concise":
        agentDef = "You are a really concise assistant provinding answers in few words."
    return agentDef + "\n"


            block_text = ""
def chat_bot():
             last_y1 = None
    """ Takes user queries and generates responses. It writes the user query and the response to the chat window."""
             last_font_size = None
    if query := st.chat_input("Ask your document anything...", key="query"):
        if "processed_input" not in st.session_state:
             st.warning("Please input your details in the sidebar first")
             return


            for line in block["lines"]:
        st.chat_message("human").write(query)
                line_text = ""
        if "vector_store" not in st.session_state:
                curr_font_size = [span["size"] for span in line["spans"]]
          process_inputs()


                for span in line["spans"]:
        retriever = get_retriever()
                    text = span["text"]
        model = c_t.get_llm()
                    font_size = span["size"]
        agentDef = agent_definition()
                    flags = span["flags"]
        if not st.session_state.history_unaware:
                    span_rect = span["bbox"]
          rag_engine = c_t.create_conversational_rag_chain(model, retriever, get_history, agentDef)
 
                    if self.is_horizontal_line(text):
                        line_text += "\n---\n"
                        continue
                   
                    text = self.clean_text(text)
 
                    if text.strip():
                        header_level = self.get_header_level(font_size)                  
                        if header_level > 0:
                            text = f"\n{'#' * header_level} {text}\n\n"
 
                        else:
                            is_list_item = self.is_bullet_point(text) or self.is_numbered_list_item(text)
 
                            if is_list_item:
                                marker, content = re.split(r'(?<=^[•◦▪▫●○\d.)])\s*', text, 1)
                                formatted_content = self.apply_formatting(content, flags)
                                text = f"{marker} {formatted_content}"
                            else:
                                text = self.apply_formatting(text, flags)
                               
                    for link in links:
                        if fitz.Rect(span_rect).intersects(link["rect"]):
                            text = f"[{text.strip()}]({link['uri']})"
                            break
 
                    line_text += text
 
                if last_y1 is not None:
                    avg_last_font_size = sum(last_font_size) / len(last_font_size) if last_font_size else 0
                    avg_current_font_size = sum(curr_font_size) / len(curr_font_size)
                    font_size_changed = abs(avg_current_font_size - avg_last_font_size) > 1
 
                    if abs(line["bbox"][3] - last_y1) > 2 or font_size_changed:
                        block_text += "\n"
                       
                block_text += self.clean_text(line_text) + " "
                last_font_size = curr_font_size
                last_y1 = line["bbox"][3]
 
            markdown_content = ""
            lines = block_text.split('\n')
            for i, line in enumerate(lines):
                clean_line = self.clean_text(line)
 
                if not in_code_block:
                    code_lang = self.detect_code_block(prev_line, clean_line)
                    if code_lang:
                        in_code_block = True
                        code_block_lang = code_lang
                        code_block_content = prev_line + "\n" + clean_line + "\n"
                        prev_line = clean_line
                        continue
 
                if in_code_block:
                    code_block_content += clean_line + "\n"
                    if i == len(lines) - 1 or self.detect_code_block(clean_line, lines[i+1]) != code_block_lang:
                        markdown_content += f"```{code_block_lang}\n{code_block_content}```\n\n"
                        in_code_block = False
                        code_block_content = ""
                        code_block_lang = None
                else:
                    if self.is_bullet_point(clean_line):
                        markdown_content += "\n" + self.convert_bullet_to_markdown(clean_line)
                        list_counter = 0
                    elif self.is_numbered_list_item(clean_line):
                        list_counter += 1
                        markdown_content += "\n" + self.convert_numbered_list_to_markdown(clean_line, list_counter) 
                    else:
                        markdown_content += f"{clean_line}\n"
                        list_counter = 0
 
                prev_line = clean_line
 
            return markdown_content + "\n"
        except Exception as e:
            self.logger.error(f"Error processing text block: {e}")
            self.logger.exception(traceback.format_exc())
            return ""
 
    def process_image_block(self, page, block):
        """Process an image block and convert it to markdown."""
        try:
            image_rect = block["bbox"]
            pix = page.get_pixmap(clip=image_rect)
            image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
           
            image_filename = f"{self.pdf_filename}_image_{int(page.number)+1}_{block['number']}.png"
            image.save(f"outputs/{image_filename}")
 
            caption = self.caption_image(image)
 
            return f"![{caption}]({image_filename})\n\n"
        except Exception as e:
            self.logger.error(f"Error processing image block: {e}")
            self.logger.exception(traceback.format_exc())
            return ""
 
    def get_header_level(self, font_size):
        """Determine header level based on font size."""
        if font_size > 24:
            return 1
        elif font_size > 20:
            return 2
        elif font_size > 18:
            return 3
        elif font_size > 16:
            return 4
        elif font_size > 14:
            return 5
        elif font_size > 12:
            return 6
         else:
         else:
            return 0
          rag_engine = c_t.create_qa_chain(model, retriever, agentDef)
 
         
    def post_process_markdown(self, markdown_content):
        response = st.chat_message("assistant").write_stream(output_chunks(rag_engine, query))
        """Post-process the markdown content."""
         if not st.session_state.history_unaware:
        try:
          st.session_state.messages.append((query, response))
            markdown_content = re.sub(r'\n{3,}', '\n\n', markdown_content) # Remove excessive newlines
            markdown_content = re.sub(r'(\d+)\s*\n', '', markdown_content)  # Remove page numbers
            markdown_content = re.sub(r' +', ' ', markdown_content)  # Remove multiple spaces
            markdown_content = re.sub(r'\s*(---\n)+', '\n\n---\n', markdown_content)  # Remove duplicate horizontal lines
 
            def remove_middle_headers(match):
                line = match.group(0)
                # Keep the initial header and remove all subsequent '#' characters
                return re.sub(r'(^#{1,6}\s).*?(?=\n)', lambda m: m.group(1) + re.sub(r'#', '', m.group(0)[len(m.group(1)):]), line)
           
            markdown_content = re.sub(r'^#{1,6}\s.*\n', remove_middle_headers, markdown_content, flags=re.MULTILINE) # Remove headers in the middle of lines       
            return markdown_content
        except Exception as e:
            self.logger.error(f"Error post-processing markdown: {e}")
            self.logger.exception(traceback.format_exc())
            return markdown_content
 
    def save_markdown(self, markdown_content):
        """Save the markdown content to a file."""
        try:
            os.makedirs("outputs", exist_ok=True)
            with open(f"outputs/{self.pdf_filename}.md", "w", encoding="utf-8") as f:
                f.write(markdown_content)
                self.logger.info("Markdown content saved successfully.")
         except Exception as e:
            self.logger.error(f"Error saving markdown content: {e}")
            self.logger.exception(traceback.format_exc())
 
def main():
    parser = argparse.ArgumentParser(description="Extract markdown-formatted content from a PDF file.")
    parser.add_argument("pdf_path", help="Path to the input PDF file")
    args = parser.parse_args()
 
    extractor = MarkdownPDFExtractor(args.pdf_path)
    markdown_pages = extractor.extract()
    return markdown_pages
 
if __name__ == "__main__":
    main()

Revision as of 12:10, 6 November 2024

import os from streamlit_interface import st import chatbot_utils as c_t from langchain_core.chat_history import BaseChatMessageHistory

modL = ["gpt-4o@openai","gpt-4-turbo@openai","gpt-3.5-turbo@openai","mixtral-8x7b-instruct-v0.1@aws-bedrock","llama-2-70b-chat@aws-bedrock","codellama-34b-instruct@together-ai","gemma-7b-it@fireworks-ai","claude-3-haiku@anthropic","claude-3-opus@anthropic","claude-3-sonnet@anthropic","mistral-7b-instruct-v0.1@fireworks-ai","mistral-7b-instruct-v0.2@fireworks-ai"] dynamic_provider = ["lowest-input-cost", "lowest-output-cost", "lowest-itl", "lowest-ttft", "highest-tks-per-sec"] model_reset_dict = {"slider_model_temperature": "model_temperature"} splitter_reset_dict = {"slider_chunk_size": "chunk_size","slider_chunk_overlap": "chunk_overlap"} retriever_reset_dict = {"slider_k": "k","slider_fetch_k": "fetch_k","slider_lambda_mult": "lambda_mult","slider_score_threshold": "score_threshold"} model_max_context_limit = {"mixtral-8x7b-instruct-v0.1": 32000,"llama-2-70b-chat": 4096,"llama-2-13b-chat": 4096,"mistral-7b-instruct-v0.2": 8192,"llama-2-7b-chat": 4096,"codellama-34b-instruct": 4096,"gemma-7b-it": 8192,"mistral-7b-instruct-v0.1": 512,"mixtral-8x22b-instruct-v0.1": 65536,"codellama-13b-instruct": 4096,"codellama-7b-instruct": 4096,"yi-34b-chat": 4096,"llama-3-8b-chat": 8192,"llama-3-70b-chat": 8192,"pplx-7b-chat": 4096,"mistral-medium": 32000,"gpt-4o": 32000,"gpt-4": 32000,"pplx-70b-chat": 4096,"gpt-3.5-turbo": 16000,"deepseek-coder-33b-instruct": 16000,"gemma-2b-it": 8192,"gpt-4-turbo": 128000,"mistral-small": 32000,"mistral-large": 32000,"claude-3-haiku": 200000,"claude-3-opus": 200000,"claude-3-sonnet": 200000} baseDir = os.environ['HOME'] + '/lav/dauvi/portfolio/audit/'


  1. ---------------------------------------------------UI--------------------------------------------------

def clear_history():

   """Clears the history stored in the session state."""
   if "store" in st.session_state:
       st.session_state.store = {}
   if "messages" in st.session_state:
       st.session_state.messages = []

def cite_response():

   """Cite a reference."""
   messL = st.session_state.messages
   query = messL[-1][0]
   retriever = get_retriever()
   docL = retriever.get_relevant_documents(query)
   docT = [x.page_content for x in docL]
   docS = "Following list of original documents\n\n"
   for i,s in enumerate(docT):
       docS += "-------- Citation " + str(i+1) + " )\n\n" + s
   st.session_state.messages.append(("Citations for: " + query,docS))
   

def output_chunks(chain, query):

   """Generates answers for the given query and a chain.
   Args:
       chain: The chain given by the user selection.
       query: The query to generate answers for.
   Yields:
       str: The generated answer.
   """
   for chunk in chain.stream(
           {"input": query},
           config={"configurable": {"session_id": "abc123"}}
   ):
       if "answer" in chunk.keys():
           yield chunk["answer"]

def get_history(session_id: str):

 """
       Retrieves the chat history for a given session.
       Parameters:
       session_id (str): The ID of the session.
       Returns:
       BaseChatMessageHistory: The chat history for the provided session ID.
 """
 if session_id not in st.session_state.store:
   st.session_state.store[session_id] = c_t.get_chat_message()
 return st.session_state.store[session_id]

def field_callback(field):

   """Displays a toast message when a field is updated."""
   st.toast(f"{field} Updated Successfully!", icon="🎉")

def process_inputs():

   """Processes the user inputs and performs vector storage."""
   
   if not st.session_state.unify_api_key or not st.session_state.endpoint or not st.session_state.pdf_docs:
       st.warning("Please enter the missing fields and upload your pdf document(s)")
   else:
       with st.status("Processing Document(s)"):
           st.write("Extracting Text")
           docL = c_t.pdf_page(st.session_state.pdf_docs,chunk_size=st.session_state.chunk_size,chunk_overlap=st.session_state.chunk_overlap)
           st.write("Splitting Text")
           st.write("Performing Vector Storage")
           if st.session_state.vector_selection == "FAISS":
               st.session_state.vector_store = c_t.faiss_vector_storage(docL,collN="web",baseDir=baseDir)
           if st.session_state.vector_selection == "chromadb":
               st.session_state.vector_store = c_t.create_collection(docL,collN="web",baseDir=baseDir)
           elif st.session_state.vector_selection == "Pinecone":
               st.session_state.vector_store = c_t.pinecone_vector_storage(docL)
           st.session_state.processed_input = True
           st.success('File(s) Submitted successfully!')

def reset_slider_value(reset_dict):

   Resets the value of sliders in the session state.
   for key, value in reset_dict.items():
       del st.session_state[value]
       init_keys()
       st.session_state[key] = st.session_state[value]

def get_retriever():

   """ Creates a retriever using the vector store in the session state and the selected search parameters."""
   if st.session_state.search_type == "similarity":
       st.session_state.search_kwargs = {"k": st.session_state.k}
   elif st.session_state.search_type == "similarity_score_threshold":
       st.session_state.search_kwargs = {
           "k": st.session_state.k,
           "score_threshold": st.session_state.score_threshold
       }
   elif st.session_state.search_type == "mmr":
       st.session_state.search_kwargs = {
           "k": st.session_state.k,
           "fetch_k": st.session_state.fetch_k,
           "lambda_mult": st.session_state.lambda_mult
       }
   retriever = st.session_state.vector_store.as_retriever(
       search_type=st.session_state.search_type,
       search_kwargs=st.session_state.search_kwargs
   )
   return retriever

def agent_definition():

   agentDef = "You are an assistant for question-answering tasks."
   if st.session_state.agent_selection == "👶 simple":
       agentDef = "You are an assistant who is able to interact with a child."
   elif st.session_state.agent_selection == "🧑‍🎓 academic":
       agentDef = "You are an assistant providing academic level of answers."
   elif st.session_state.agent_selection == "🧑‍🔧 technical":
       agentDef = "You are a technical expert explaining the solution in detail"
   elif st.session_state.agent_selection == "🧑‍🏫 didactic":
       agentDef = "You are a teacher explaining in a didactic way to a large audience"
   elif st.session_state.agent_selection == "🤖 concise":
       agentDef = "You are a really concise assistant provinding answers in few words."
   return agentDef + "\n"

def chat_bot():

   """ Takes user queries and generates responses. It writes the user query and the response to the chat window."""
   if query := st.chat_input("Ask your document anything...", key="query"):
       if "processed_input" not in st.session_state:
           st.warning("Please input your details in the sidebar first")
           return
       st.chat_message("human").write(query)
       if "vector_store" not in st.session_state:
         process_inputs()
       retriever = get_retriever()
       model = c_t.get_llm()
       agentDef = agent_definition()
       if not st.session_state.history_unaware:
         rag_engine = c_t.create_conversational_rag_chain(model, retriever, get_history, agentDef)
       else:
         rag_engine = c_t.create_qa_chain(model, retriever, agentDef)
         
       response = st.chat_message("assistant").write_stream(output_chunks(rag_engine, query))
       if not st.session_state.history_unaware:
         st.session_state.messages.append((query, response))