Spaces:
Sleeping
Sleeping
File size: 5,058 Bytes
1ce1659 0542c93 1ce1659 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
from collections import Counter
import os
import string
import requests
from dotenv import load_dotenv
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from src.application.text.identity import extract_entities
load_dotenv()
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
SEARCH_ENGINE_ID = os.getenv("SEARCH_ENGINE_ID")
def search_by_google(
query,
num_results=10,
is_exact_terms = False
) -> dict:
"""
Searches the Google Custom Search Engine for the given query.
Args:
query: The search query.
is_exact_terms: Whether to use exact terms search (True) or regular search (False).
num_results: The number of results to return (default: 10).
Returns:
A dictionary containing the search results or None if there was an error.
"""
url = "https://www.googleapis.com/customsearch/v1"
params = {
"key": GOOGLE_API_KEY,
"cx": SEARCH_ENGINE_ID,
"num": num_results,
}
if is_exact_terms:
params["exactTerms"] = query
else:
params["q"] = query.replace('"', "")
response = requests.get(url, params=params)
if response.status_code == 200:
return response.json()
else:
print(f"Error: {response.status_code}, {response.text}")
return None
def get_most_frequent_words(input_text, number_word=32):
"""
Gets the top words from the input text, excluding stop words and punctuation.
Args:
input_text: The input text as a string.
number_word: The number of top words to return.
Returns:
A list of tuples, where each tuple contains a word and its frequency.
Returns an empty list if input is not a string or is empty.
"""
if not isinstance(input_text, str) or not input_text:
return []
words = word_tokenize(input_text.lower()) # Tokenize and lowercase
stop_words = set(stopwords.words('english'))
punctuation = set(string.punctuation) # get all punctuation
filtered_words = [
word for word in words
if word.isalnum() and word not in stop_words and word not in punctuation
]
word_frequencies = Counter(filtered_words)
top_words = word_frequencies.most_common(number_word)
for top_word in top_words:
words.append(top_word[0])
if len(words) > 32:
search_phrase = " ".join(words[:32])
else:
search_phrase = " ".join(words[:number_word])
return search_phrase
def get_chunk(input_text, chunk_length=32, num_chunk=3):
"""
Splits the input text into chunks of a specified length.
Args:
input_text: The input text as a string.
num_chunk: The maximum number of chunks to create.
chunk_length: The desired length of each chunk (in words).
Returns:
A list of string chunks.
Returns an empty list if input is invalid.
"""
if not isinstance(input_text, str):
return []
chunks = []
input_words = input_text.split() # Split by any whitespace
for i in range(num_chunk):
start_index = i * chunk_length
end_index = (i + 1) * chunk_length
chunk = " ".join(input_words[start_index:end_index])
if chunk: # Only append non-empty chunks
chunks.append(chunk)
return chunks
def get_keywords(text, num_keywords=5):
"""Return top k keywords from a doc using TF-IDF method"""
# Create a TF-IDF Vectorizer
vectorizer = TfidfVectorizer(stop_words='english')
# Fit and transform the text
tfidf_matrix = vectorizer.fit_transform([text])
# Get feature names (words)
feature_names = vectorizer.get_feature_names_out()
# Get TF-IDF scores
tfidf_scores = tfidf_matrix.toarray()[0]
# Sort words by TF-IDF score
word_scores = list(zip(feature_names, tfidf_scores))
word_scores.sort(key=lambda x: x[1], reverse=True)
# Return top keywords
return [word for word, score in word_scores[:num_keywords]]
def generate_search_phrases(input_text):
"""
Generates different types of phrases for search purposes.
Args:
input_text: The input text.
Returns:
A list containing:
- A list of most frequent words.
- The original input text.
- A list of text chunks.
"""
if not isinstance(input_text, str):
return []
search_phrases = []
# Method 1: Get most frequent words
search_phrases.append(get_most_frequent_words(input_text))
# Method 2: Get the whole text
search_phrases.append(input_text)
# Method 3: Split text by chunks
search_phrases.extend(get_chunk(input_text))
# Method 4: Get most identities and key words
entities = extract_entities(input_text)
keywords = get_keywords(input_text, 16)
search_phrase = " ".join(entities) + " " + " ".join(keywords)
search_phrases.append(search_phrase)
return search_phrases
|