Update app.py
Browse files
app.py
CHANGED
@@ -17,6 +17,7 @@ from langchain.vectorstores import VectorstoreIndexCreator
|
|
17 |
from sentence_transformers import SentenceTransformer
|
18 |
import faiss
|
19 |
|
|
|
20 |
# ----------------- تنظیمات صفحه -----------------
|
21 |
st.set_page_config(page_title="چت بات توانا", page_icon="🪖", layout="wide")
|
22 |
|
@@ -100,23 +101,51 @@ st.markdown("""
|
|
100 |
|
101 |
# ----------------- لود PDF و ساخت ایندکس -----------------
|
102 |
|
103 |
-
@st.cache_resource
|
104 |
@st.cache_resource
|
105 |
def get_pdf_index():
|
106 |
with st.spinner('📄 در حال پردازش فایل PDF...'):
|
|
|
107 |
loader = [PyPDFLoader('test1.pdf')]
|
108 |
|
109 |
-
|
|
|
110 |
model = SentenceTransformer(model_name)
|
111 |
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
-
return index_creator.from_loaders(loader)
|
120 |
|
121 |
# ----------------- تعریف LLM از Groq -----------------
|
122 |
llm = ChatOpenAI(
|
@@ -125,6 +154,7 @@ llm = ChatOpenAI(
|
|
125 |
model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
|
126 |
)
|
127 |
|
|
|
128 |
# ----------------- تعریف SimpleRetriever -----------------
|
129 |
class SimpleRetriever(BaseRetriever):
|
130 |
documents: List[Document] = Field(...)
|
@@ -135,12 +165,14 @@ class SimpleRetriever(BaseRetriever):
|
|
135 |
sentence_model = SentenceTransformer("togethercomputer/m2-bert-80M-8k-retrieval", trust_remote_code=True)
|
136 |
query_embedding = sentence_model.encode(query, convert_to_numpy=True)
|
137 |
|
|
|
138 |
_, indices = self.index.search(np.expand_dims(query_embedding, axis=0), 5) # پیدا کردن 5 سند مشابه
|
139 |
|
140 |
return [self.documents[i] for i in indices[0]]
|
141 |
|
|
|
142 |
# ----------------- ساخت Index -----------------
|
143 |
-
documents, embeddings, index =
|
144 |
retriever = SimpleRetriever(documents=documents, embeddings=embeddings, index=index)
|
145 |
|
146 |
# ----------------- ساخت Chain -----------------
|
|
|
17 |
from sentence_transformers import SentenceTransformer
|
18 |
import faiss
|
19 |
|
20 |
+
|
21 |
# ----------------- تنظیمات صفحه -----------------
|
22 |
st.set_page_config(page_title="چت بات توانا", page_icon="🪖", layout="wide")
|
23 |
|
|
|
101 |
|
102 |
# ----------------- لود PDF و ساخت ایندکس -----------------
|
103 |
|
|
|
104 |
@st.cache_resource
|
105 |
def get_pdf_index():
|
106 |
with st.spinner('📄 در حال پردازش فایل PDF...'):
|
107 |
+
# بارگذاری فایل PDF
|
108 |
loader = [PyPDFLoader('test1.pdf')]
|
109 |
|
110 |
+
# بارگذاری مدل از Hugging Face
|
111 |
+
model_name = "togethercomputer/m2-bert-80M-8k-retrieval" # مدل مورد نظر از Hugging Face
|
112 |
model = SentenceTransformer(model_name)
|
113 |
|
114 |
+
# تقسیم متنها به بخشهای کوچک
|
115 |
+
splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)
|
116 |
+
texts = []
|
117 |
+
for doc in loader:
|
118 |
+
texts.extend(splitter.split_text(doc.page_content))
|
119 |
+
|
120 |
+
# مقداردهی اولیه پروسه بار
|
121 |
+
progress_bar = st.progress(0)
|
122 |
+
total_docs = len(texts)
|
123 |
+
|
124 |
+
embeddings = []
|
125 |
+
batch_size = 128
|
126 |
+
for i in range(0, total_docs, batch_size):
|
127 |
+
batch_texts = texts[i:i+batch_size]
|
128 |
+
batch_embeddings = model.encode(batch_texts, convert_to_numpy=True)
|
129 |
+
embeddings.extend(batch_embeddings)
|
130 |
+
|
131 |
+
# بهروزرسانی پروسه بار
|
132 |
+
progress_bar.progress(min((i + batch_size) / total_docs, 1.0))
|
133 |
|
134 |
+
# اضافه کردن تاخیر برای تکمیل پروسه بار
|
135 |
+
time.sleep(1)
|
136 |
+
progress_bar.empty()
|
137 |
+
|
138 |
+
# ساخت ایندکس با استفاده از FAISS
|
139 |
+
embeddings = np.array(embeddings)
|
140 |
+
index = faiss.IndexFlatL2(embeddings.shape[1]) # ایندکس FAISS
|
141 |
+
index.add(embeddings)
|
142 |
+
|
143 |
+
# بازگشت ایندکس
|
144 |
+
return VectorstoreIndexCreator(
|
145 |
+
embedding=model.encode,
|
146 |
+
text_splitter=splitter
|
147 |
+
).from_loaders(loader)
|
148 |
|
|
|
149 |
|
150 |
# ----------------- تعریف LLM از Groq -----------------
|
151 |
llm = ChatOpenAI(
|
|
|
154 |
model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
|
155 |
)
|
156 |
|
157 |
+
|
158 |
# ----------------- تعریف SimpleRetriever -----------------
|
159 |
class SimpleRetriever(BaseRetriever):
|
160 |
documents: List[Document] = Field(...)
|
|
|
165 |
sentence_model = SentenceTransformer("togethercomputer/m2-bert-80M-8k-retrieval", trust_remote_code=True)
|
166 |
query_embedding = sentence_model.encode(query, convert_to_numpy=True)
|
167 |
|
168 |
+
# جستجوی اسناد مشابه
|
169 |
_, indices = self.index.search(np.expand_dims(query_embedding, axis=0), 5) # پیدا کردن 5 سند مشابه
|
170 |
|
171 |
return [self.documents[i] for i in indices[0]]
|
172 |
|
173 |
+
|
174 |
# ----------------- ساخت Index -----------------
|
175 |
+
documents, embeddings, index = get_pdf_index()
|
176 |
retriever = SimpleRetriever(documents=documents, embeddings=embeddings, index=index)
|
177 |
|
178 |
# ----------------- ساخت Chain -----------------
|