|
import os |
|
import time |
|
import streamlit as st |
|
from groq import Groq |
|
from langchain.document_loaders import PyPDFLoader |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.schema import Document as LangchainDocument |
|
from langchain.vectorstores import FAISS |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from langchain.chains import RetrievalQA |
|
from langchain.llms import OpenAI |
|
|
|
|
|
st.set_page_config(page_title="چتبات ارتش - فقط از PDF", page_icon="🪖", layout="wide") |
|
|
|
|
|
|
|
|
|
groq_api_key = "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" |
|
|
|
|
|
@st.cache_resource |
|
def build_pdf_index(): |
|
with st.spinner('📄 در حال پردازش فایل PDF...'): |
|
loader = PyPDFLoader("test1.pdf") |
|
pages = loader.load() |
|
|
|
|
|
splitter = RecursiveCharacterTextSplitter( |
|
chunk_size=500, |
|
chunk_overlap=50 |
|
) |
|
|
|
texts = [] |
|
for page in pages: |
|
texts.extend(splitter.split_text(page.page_content)) |
|
|
|
|
|
documents = [LangchainDocument(page_content=t) for t in texts] |
|
|
|
|
|
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") |
|
|
|
vectordb = FAISS.from_documents(documents, embedding=embeddings) |
|
|
|
return vectordb |
|
|
|
|
|
index = build_pdf_index() |
|
|
|
|
|
client = Groq(api_key=groq_api_key) |
|
|
|
class GroqLLM(OpenAI): |
|
def __init__(self, api_key, model_name): |
|
super().__init__(openai_api_key=api_key, model_name=model_name, base_url="https://api.groq.com/openai/v1") |
|
|
|
llm = GroqLLM(api_key=groq_api_key, model_name="llama3-70b-8192") |
|
|
|
|
|
chain = RetrievalQA.from_chain_type( |
|
llm=llm, |
|
retriever=index.as_retriever(), |
|
chain_type="stuff", |
|
input_key="question" |
|
) |
|
|
|
|
|
if 'messages' not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
if 'pending_prompt' not in st.session_state: |
|
st.session_state.pending_prompt = None |
|
|
|
|
|
for msg in st.session_state.messages: |
|
with st.chat_message(msg['role']): |
|
st.markdown(f"🗨️ {msg['content']}", unsafe_allow_html=True) |
|
|
|
|
|
prompt = st.chat_input("سوالی در مورد فایل بپرس...") |
|
|
|
if prompt: |
|
st.session_state.messages.append({'role': 'user', 'content': prompt}) |
|
st.session_state.pending_prompt = prompt |
|
st.rerun() |
|
|
|
|
|
if st.session_state.pending_prompt: |
|
with st.chat_message('ai'): |
|
thinking = st.empty() |
|
thinking.markdown("🤖 در حال فکر کردن از روی PDF...") |
|
|
|
try: |
|
|
|
response = chain.run(f"سوال: {st.session_state.pending_prompt}") |
|
answer = response.strip() |
|
|
|
except Exception as e: |
|
answer = f"خطا در پاسخدهی: {str(e)}" |
|
|
|
thinking.empty() |
|
|
|
|
|
full_response = "" |
|
placeholder = st.empty() |
|
for word in answer.split(): |
|
full_response += word + " " |
|
placeholder.markdown(full_response + "▌") |
|
time.sleep(0.03) |
|
|
|
placeholder.markdown(full_response) |
|
st.session_state.messages.append({'role': 'ai', 'content': full_response}) |
|
st.session_state.pending_prompt = None |
|
|