|
|
(6 intermediate revisions by the same user not shown) |
Line 1: |
Line 1: |
| import re
| | Kotoba |
| import nltk
| |
| from nltk.corpus import stopwords
| |
| from nltk.stem import PorterStemmer, WordNetLemmatizer
| |
| | |
| # Initialize stemmer and lemmatizer
| |
| stemmer = PorterStemmer()
| |
| lemmatizer = WordNetLemmatizer()
| |
| STOPWORDS = set(stopwords.words('english'))
| |
| | |
| nltk.download('stopwords')
| |
| nltk.download('wordnet')
| |
| | |
| | |
| def clean_text(text):
| |
| # Original Text
| |
| # Example: "This is a Testing @username https://example.com <p>Paragraphs!</p> #happy :)"
| |
| | |
| text = text.lower() # Convert all characters in text to lowercase
| |
| # Example after this step: "i won't go there! this is a testing @username https://example.com <p>paragraphs!</p> #happy :)"
| |
| | |
| text = re.sub(r'https?://\S+|www\.\S+', '', text) # Remove URLs
| |
| # Example after this step: "i won't go there! this is a testing @username <p>paragraphs!</p> #happy :)"
| |
| | |
| text = re.sub(r'<.*?>', '', text) # Remove HTML tags
| |
| # Example after this step: "i won't go there! this is a testing @username paragraphs! #happy :)"
| |
| | |
| text = re.sub(r'@\w+', '', text) # Remove mentions
| |
| # Example after this step: "i won't go there! this is a testing paragraphs! #happy :)"
| |
| | |
| text = re.sub(r'#\w+', '', text) # Remove hashtags
| |
| # Example after this step: "i won't go there! this is a testing paragraphs! :)"
| |
| | |
| # Translate emoticons to their word equivalents
| |
| emoticons = {':)': 'smile', ':-)': 'smile', ':(': 'sad', ':-(': 'sad'}
| |
| words = text.split()
| |
| words = [emoticons.get(word, word) for word in words]
| |
| text = " ".join(words)
| |
| # Example after this step: "i won't go there! this is a testing paragraphs! smile"
| |
| | |
| text = re.sub(r'[^\w\s]', '', text) # Remove punctuations
| |
| # Example after this step: "i won't go there this is a testing paragraphs smile"
| |
| | |
| text = re.sub(r'\s+[a-zA-Z]\s+', ' ', text) # Remove standalone single alphabetical characters
| |
| # Example after this step: "won't go there this is testing paragraphs smile"
| |
| | |
| text = re.sub(r'\s+', ' ', text, flags=re.I) # Substitute multiple consecutive spaces with a single space
| |
| # Example after this step: "won't go there this is testing paragraphs smile"
| |
| | |
| # Remove stopwords
| |
| text = ' '.join(word for word in text.split() if word not in STOPWORDS)
| |
| # Example after this step: "won't go there testing paragraphs smile"
| |
| | |
| # Stemming
| |
| stemmer = PorterStemmer()
| |
| text = ' '.join(stemmer.stem(word) for word in text.split())
| |
| # Example after this step: "won't go there test paragraph smile"
| |
| | |
| # Lemmatization. (flies --> fly, went --> go)
| |
| lemmatizer = WordNetLemmatizer()
| |
| text = ' '.join(lemmatizer.lemmatize(word) for word in text.split())
| |
| | |
| return text
| |
| | |
| # Assuming docs is a list of objects and each object has a page_content and metadata attribute.
| |
| for doc in docs:
| |
| original_content = doc.page_content # Save the original page_content.
| |
| doc.page_content = clean_text(original_content) # Update page_content with the cleaned text.
| |
| | |
| # Assuming metadata is a dictionary, and updating it with the original page_content under the key 'prompt'.
| |
| if doc.metadata is None: # Check if metadata is None and initialize if necessary.
| |
| doc.metadata = {}
| |
| doc.metadata['prompt'] = original_content
| |
| print(docs[0])
| |