melk2025 commited on
Commit
ef5f214
·
verified ·
1 Parent(s): 032ce2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -7
app.py CHANGED
@@ -20,6 +20,7 @@ import os
20
  import requests
21
  import time
22
  import tempfile
 
23
 
24
  API_KEY = os.environ.get("OPENROUTER_API_KEY")
25
 
@@ -82,18 +83,31 @@ qa_questions = list(qa_data.keys())
82
  qa_answers = list(qa_data.values())
83
  qa_embeddings = semantic_model.encode(qa_questions, convert_to_tensor=True)
84
  #-------------------------bm25---------------------------------
85
- from rank_bm25 import BM25Okapi
86
- from nltk.tokenize import word_tokenize
87
- import nltk
88
- nltk.download('punkt')
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  def rerank_with_bm25(docs, query):
92
- tokenized_docs = [word_tokenize(doc['content'].lower()) for doc in docs]
 
 
93
  bm25 = BM25Okapi(tokenized_docs)
94
- tokenized_query = word_tokenize(query.lower())
95
-
96
  scores = bm25.get_scores(tokenized_query)
 
97
  top_indices = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:3]
98
  return [docs[i] for i in top_indices]
99
 
 
20
  import requests
21
  import time
22
  import tempfile
23
+ from langdetect import detect
24
 
25
  API_KEY = os.environ.get("OPENROUTER_API_KEY")
26
 
 
83
  qa_answers = list(qa_data.values())
84
  qa_embeddings = semantic_model.encode(qa_questions, convert_to_tensor=True)
85
  #-------------------------bm25---------------------------------
 
 
 
 
86
 
87
+ def detect_language(text):
88
+ try:
89
+ lang = detect(text)
90
+ return 'french' if lang.startswith('fr') else 'english'
91
+ except:
92
+ return 'english' # default fallback
93
+
94
+ def clean_and_tokenize(text, lang):
95
+ tokens = word_tokenize(text.lower(), language=lang)
96
+ try:
97
+ stop_words = set(stopwords.words(lang))
98
+ return [t for t in tokens if t not in stop_words]
99
+ except:
100
+ return tokens # fallback if stopwords not found
101
 
102
  def rerank_with_bm25(docs, query):
103
+ lang = detect_language(query)
104
+
105
+ tokenized_docs = [clean_and_tokenize(doc['content'], lang) for doc in docs]
106
  bm25 = BM25Okapi(tokenized_docs)
107
+
108
+ tokenized_query = clean_and_tokenize(query, lang)
109
  scores = bm25.get_scores(tokenized_query)
110
+
111
  top_indices = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:3]
112
  return [docs[i] for i in top_indices]
113