Update app.py
Browse files
app.py
CHANGED
@@ -1,29 +1,14 @@
|
|
1 |
import os
|
2 |
import time
|
3 |
from langchain_together import TogetherEmbeddings
|
4 |
-
|
5 |
-
|
6 |
import streamlit as st
|
7 |
from langchain.chat_models import ChatOpenAI
|
8 |
from langchain.document_loaders import PyPDFLoader
|
9 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
10 |
from langchain.schema import Document
|
11 |
from langchain.chains import RetrievalQA
|
12 |
-
from langchain_core.retrievers import BaseRetriever
|
13 |
-
from langchain_core.prompts import PromptTemplate
|
14 |
-
from typing import List
|
15 |
-
from pydantic import Field
|
16 |
-
import numpy as np
|
17 |
-
from sentence_transformers import SentenceTransformer
|
18 |
-
import faiss
|
19 |
-
from langchain.indexes import VectorstoreIndexCreator
|
20 |
from langchain.vectorstores import FAISS
|
21 |
-
from langchain.
|
22 |
-
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
|
23 |
-
|
24 |
-
from transformers import AutoTokenizer
|
25 |
-
|
26 |
-
|
27 |
|
28 |
# ----------------- تنظیمات صفحه -----------------
|
29 |
st.set_page_config(page_title="چت بات توانا", page_icon="🪖", layout="wide")
|
@@ -107,20 +92,22 @@ st.markdown("""
|
|
107 |
""", unsafe_allow_html=True)
|
108 |
|
109 |
# ----------------- لود PDF و ساخت ایندکس -----------------
|
110 |
-
# tokenizer = AutoTokenizer.from_pretrained("HooshvareLab/gpt2-fa")
|
111 |
-
# tokenizer.pad_token = tokenizer.eos_token # یا میتوانید این خط را برای توکن جدید فعال کنید: tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|
112 |
-
|
113 |
@st.cache_resource
|
114 |
def get_pdf_index():
|
115 |
with st.spinner('📄 در حال پردازش فایل PDF...'):
|
|
|
116 |
pdf_loader = PyPDFLoader('test1.pdf')
|
117 |
-
# embeddings = SentenceTransformer("Thomslionel/embedings")
|
118 |
-
# embeddings = HuggingFaceInstructEmbeddings(model_name="aidal/Persian-Mistral-7B")
|
119 |
|
|
|
120 |
embeddings = TogetherEmbeddings(
|
121 |
api_key="0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979"
|
122 |
)
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
124 |
return index
|
125 |
|
126 |
# ----------------- بارگذاری دیتا -----------------
|
@@ -145,10 +132,12 @@ if 'messages' not in st.session_state:
|
|
145 |
if 'pending_prompt' not in st.session_state:
|
146 |
st.session_state.pending_prompt = None
|
147 |
|
|
|
148 |
for msg in st.session_state.messages:
|
149 |
with st.chat_message(msg['role']):
|
150 |
st.markdown(f"🗨️ {msg['content']}", unsafe_allow_html=True)
|
151 |
|
|
|
152 |
prompt = st.chat_input("چطور میتونم کمک کنم؟")
|
153 |
|
154 |
if prompt:
|
@@ -161,6 +150,7 @@ if st.session_state.pending_prompt:
|
|
161 |
thinking = st.empty()
|
162 |
thinking.markdown("🤖 در حال فکر کردن...")
|
163 |
|
|
|
164 |
response = chain.run(f'پاسخ را فقط به زبان فارسی جواب بده. سوال: {st.session_state.pending_prompt}')
|
165 |
answer = response.split("Helpful Answer:")[-1].strip()
|
166 |
if not answer:
|
@@ -169,6 +159,8 @@ if st.session_state.pending_prompt:
|
|
169 |
thinking.empty()
|
170 |
full_response = ""
|
171 |
placeholder = st.empty()
|
|
|
|
|
172 |
for word in answer.split():
|
173 |
full_response += word + " "
|
174 |
placeholder.markdown(full_response + "▌")
|
@@ -177,7 +169,3 @@ if st.session_state.pending_prompt:
|
|
177 |
placeholder.markdown(full_response)
|
178 |
st.session_state.messages.append({'role': 'ai', 'content': full_response})
|
179 |
st.session_state.pending_prompt = None
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
|
|
1 |
import os
|
2 |
import time
|
3 |
from langchain_together import TogetherEmbeddings
|
|
|
|
|
4 |
import streamlit as st
|
5 |
from langchain.chat_models import ChatOpenAI
|
6 |
from langchain.document_loaders import PyPDFLoader
|
7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
from langchain.schema import Document
|
9 |
from langchain.chains import RetrievalQA
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
from langchain.vectorstores import FAISS
|
11 |
+
from langchain.indexes import VectorstoreIndexCreator
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# ----------------- تنظیمات صفحه -----------------
|
14 |
st.set_page_config(page_title="چت بات توانا", page_icon="🪖", layout="wide")
|
|
|
92 |
""", unsafe_allow_html=True)
|
93 |
|
94 |
# ----------------- لود PDF و ساخت ایندکس -----------------
|
|
|
|
|
|
|
95 |
@st.cache_resource
|
96 |
def get_pdf_index():
|
97 |
with st.spinner('📄 در حال پردازش فایل PDF...'):
|
98 |
+
# بارگذاری PDF
|
99 |
pdf_loader = PyPDFLoader('test1.pdf')
|
|
|
|
|
100 |
|
101 |
+
# تنظیم embedding ها برای زبان فارسی
|
102 |
embeddings = TogetherEmbeddings(
|
103 |
api_key="0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979"
|
104 |
)
|
105 |
+
|
106 |
+
# ساخت ایندکس با استفاده از PDF و embeddings
|
107 |
+
index = VectorstoreIndexCreator(embedding=embeddings, text_splitter=RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)).from_loaders([pdf_loader])
|
108 |
+
|
109 |
+
# چاپ تعداد بخشها برای بررسی
|
110 |
+
st.write(f"تعداد بخشهای پردازششده: {len(index.vectorstore)}")
|
111 |
return index
|
112 |
|
113 |
# ----------------- بارگذاری دیتا -----------------
|
|
|
132 |
if 'pending_prompt' not in st.session_state:
|
133 |
st.session_state.pending_prompt = None
|
134 |
|
135 |
+
# نمایش پیامها در چت
|
136 |
for msg in st.session_state.messages:
|
137 |
with st.chat_message(msg['role']):
|
138 |
st.markdown(f"🗨️ {msg['content']}", unsafe_allow_html=True)
|
139 |
|
140 |
+
# دریافت ورودی از کاربر
|
141 |
prompt = st.chat_input("چطور میتونم کمک کنم؟")
|
142 |
|
143 |
if prompt:
|
|
|
150 |
thinking = st.empty()
|
151 |
thinking.markdown("🤖 در حال فکر کردن...")
|
152 |
|
153 |
+
# اجرای جستجو در ایندکس برای دریافت پاسخ
|
154 |
response = chain.run(f'پاسخ را فقط به زبان فارسی جواب بده. سوال: {st.session_state.pending_prompt}')
|
155 |
answer = response.split("Helpful Answer:")[-1].strip()
|
156 |
if not answer:
|
|
|
159 |
thinking.empty()
|
160 |
full_response = ""
|
161 |
placeholder = st.empty()
|
162 |
+
|
163 |
+
# نمایش پاسخ به صورت تدریجی
|
164 |
for word in answer.split():
|
165 |
full_response += word + " "
|
166 |
placeholder.markdown(full_response + "▌")
|
|
|
169 |
placeholder.markdown(full_response)
|
170 |
st.session_state.messages.append({'role': 'ai', 'content': full_response})
|
171 |
st.session_state.pending_prompt = None
|
|
|
|
|
|
|
|