import os
import time
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document as LangchainDocument
from langchain.chains import RetrievalQA
import torch
from langchain_core.retrievers import BaseRetriever
from langchain_core.documents import Document
from typing import List
from pydantic import Field
from sentence_transformers import SentenceTransformer
import numpy as np
from langchain.indexes.vectorstore import VectorstoreIndexCreator
from sentence_transformers import SentenceTransformer
import faiss
# ----------------- تنظیمات صفحه -----------------
st.set_page_config(page_title="چت بات توانا", page_icon="🪖", layout="wide")
st.markdown("""
""", unsafe_allow_html=True)
col1, col2, col3 = st.columns([1, 2, 1])
with col2:
st.image("army.png", width=240)
st.markdown("""
""", unsafe_allow_html=True)
# ----------------- لود PDF و ساخت ایندکس -----------------
@st.cache_resource
def get_pdf_index():
with st.spinner('📄 در حال پردازش فایل PDF...'):
# بارگذاری PDF
loader = PyPDFLoader('test1.pdf')
documents = loader.load_and_split() # اینجا متن PDF را استخراج میکنیم
model = TogetherEmbeddings(
model_name="togethercomputer/m2-bert-80M-8k-retrieval",
api_key="0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979"
)
splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)
texts = []
for doc in documents:
texts.extend(splitter.split_text(doc.page_content))
progress_bar = st.progress(0)
total_docs = len(texts)
embeddings = []
batch_size = 512
for i in range(0, total_docs, batch_size):
batch_texts = texts[i:i + batch_size]
batch_embeddings = model.encode(batch_texts, convert_to_numpy=True)
embeddings.extend(batch_embeddings)
progress_bar.progress(min((i + batch_size) / total_docs, 1.0))
time.sleep(1)
progress_bar.empty()
return VectorstoreIndexCreator(
embedding=embeddings,
text_splitter=RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)
).from_loaders(loader)
# ----------------- تعریف LLM از Groq -----------------
llm = ChatOpenAI(
base_url="https://api.together.xyz/v1",
api_key='0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979',
model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
)
# ----------------- تعریف SimpleRetriever -----------------
class SimpleRetriever(BaseRetriever):
documents: List[dict] = Field(...) # تغییر نوع مستند به dict
embeddings: List[np.ndarray] = Field(...)
index: faiss.Index
def _get_relevant_documents(self, query: str) -> List[dict]:
sentence_model = SentenceTransformer("togethercomputer/m2-bert-80M-8k-retrieval", trust_remote_code=True)
query_embedding = sentence_model.encode(query, convert_to_numpy=True)
# جستجوی اسناد مشابه
_, indices = self.index.search(np.expand_dims(query_embedding, axis=0), 5) # پیدا کردن 5 سند مشابه
return [self.documents[i] for i in indices[0]]
# ----------------- ساخت Index -----------------
documents, embeddings, index = get_pdf_index()
retriever = SimpleRetriever(documents=documents, embeddings=embeddings, index=index)
# ----------------- ساخت Chain -----------------
chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
chain_type="stuff",
input_key="question"
)
# ----------------- استیت برای چت -----------------
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'pending_prompt' not in st.session_state:
st.session_state.pending_prompt = None
# ----------------- نمایش پیامهای قبلی -----------------
for msg in st.session_state.messages:
with st.chat_message(msg['role']):
st.markdown(f"🗨️ {msg['content']}", unsafe_allow_html=True)
# ----------------- ورودی چت -----------------
prompt = st.chat_input("سوالی در مورد فایل بپرس...")
if prompt:
st.session_state.messages.append({'role': 'user', 'content': prompt})
st.session_state.pending_prompt = prompt
st.rerun()
# ----------------- پاسخ مدل -----------------
if st.session_state.pending_prompt:
with st.chat_message('ai'):
thinking = st.empty()
thinking.markdown("🤖 در حال فکر کردن ...")
try:
response = chain.run(f"سوال: {st.session_state.pending_prompt}")
answer = response.strip()
except Exception as e:
answer = f"خطا در پاسخدهی: {str(e)}"
thinking.empty()
full_response = ""
placeholder = st.empty()
for word in answer.split():
full_response += word + " "
placeholder.markdown(full_response + "▌")
time.sleep(0.03)
placeholder.markdown(full_response)
st.session_state.messages.append({'role': 'ai', 'content': full_response})
st.session_state.pending_prompt = None