army / app.py
M17idd's picture
Update app.py
f21cdf6 verified
raw
history blame
5.72 kB
import os
import time
import streamlit as st
from transformers import AutoTokenizer, AutoModel
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document as LangchainDocument
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from groq import Groq
import torch
from langchain_core.retrievers import BaseRetriever
# ----------------- تنظیمات صفحه -----------------
st.set_page_config(page_title="چت‌بات ارتش - فقط از PDF", page_icon="🪖", layout="wide")
# ----------------- بارگذاری مدل FarsiBERT -----------------
model_name = "HooshvareLab/bert-fa-zwnj-base" # مدل BERT فارسی
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)
# ----------------- لود PDF و ساخت ایندکس -----------------
@st.cache_resource
def build_pdf_index():
with st.spinner('📄 در حال پردازش فایل PDF...'):
loader = PyPDFLoader("test1.pdf")
pages = loader.load()
# تکه‌تکه کردن متن PDF
splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50
)
texts = []
for page in pages:
texts.extend(splitter.split_text(page.page_content))
# تبدیل به Document
documents = [LangchainDocument(page_content=t) for t in texts]
# استفاده از FarsiBERT برای تولید امبدینگ
embeddings = []
for doc in documents:
inputs = tokenizer(doc.page_content, return_tensors="pt", padding=True, truncation=True)
with torch.no_grad():
outputs = model(**inputs)
embeddings.append(outputs.last_hidden_state.mean(dim=1).numpy()) # میانگین امبدینگ‌ها
# به جای FAISS، فقط لیست امبدینگ‌ها را برمی‌گردانیم
return documents, embeddings
# ----------------- ساختن Index از PDF -----------------
# ----------------- تعریف LLM از Groq -----------------
groq_api_key = "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"
client = Groq(api_key=groq_api_key)
class GroqLLM(OpenAI):
def __init__(self, api_key, model_name):
super().__init__(openai_api_key=api_key, model_name=model_name, base_url="https://api.groq.com/openai/v1")
# مدل Groq را با API خود بارگذاری کنید
llm = GroqLLM(api_key=groq_api_key, model_name="deepseek-r1-distill-llama-70b")
# ----------------- ساخت SimpleRetriever -----------------
from langchain_core.retrievers import BaseRetriever
from langchain_core.documents import Document
from typing import List
from dataclasses import dataclass, field
@dataclass
class SimpleRetriever(BaseRetriever):
documents: List[Document] = field(default_factory=list)
embeddings: List = field(default_factory=list)
def __init__(self):
super().__init__()
self.documents, self.embeddings = build_pdf_index()
def _get_relevant_documents(self, query: str) -> List[Document]:
inputs = tokenizer(query, return_tensors="pt", padding=True, truncation=True)
with torch.no_grad():
outputs = model(**inputs)
query_embedding = outputs.last_hidden_state.mean(dim=1).numpy()
similarities = []
for doc_embedding in self.embeddings:
similarity = (query_embedding * doc_embedding).sum()
similarities.append(similarity)
ranked_docs = sorted(zip(similarities, self.documents), reverse=True)
return [doc for _, doc in ranked_docs[:5]]
documents, embeddings = build_pdf_index()
retriever = SimpleRetriever(documents=documents, embeddings=embeddings)
# بعد chain را بساز
chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
chain_type="stuff",
input_key="question"
)
# ----------------- استیت برای چت -----------------
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'pending_prompt' not in st.session_state:
st.session_state.pending_prompt = None
# ----------------- نمایش پیام‌های قبلی -----------------
for msg in st.session_state.messages:
with st.chat_message(msg['role']):
st.markdown(f"🗨️ {msg['content']}", unsafe_allow_html=True)
# ----------------- ورودی چت -----------------
prompt = st.chat_input("سوالی در مورد فایل بپرس...")
if prompt:
st.session_state.messages.append({'role': 'user', 'content': prompt})
st.session_state.pending_prompt = prompt
st.rerun()
# ----------------- پاسخ مدل فقط از روی PDF -----------------
if st.session_state.pending_prompt:
with st.chat_message('ai'):
thinking = st.empty()
thinking.markdown("🤖 در حال فکر کردن از روی PDF...")
try:
# گرفتن جواب فقط از PDF
response = chain.run(f"سوال: {st.session_state.pending_prompt}")
answer = response.strip()
except Exception as e:
answer = f"خطا در پاسخ‌دهی: {str(e)}"
thinking.empty()
# انیمیشن تایپ پاسخ
full_response = ""
placeholder = st.empty()
for word in answer.split():
full_response += word + " "
placeholder.markdown(full_response + "▌")
time.sleep(0.03)
placeholder.markdown(full_response)
st.session_state.messages.append({'role': 'ai', 'content': full_response})
st.session_state.pending_prompt = None