import time
import streamlit as st
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import FAISS
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from typing import List
from together import Together
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import AutoTokenizer, AutoModel
import torch
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
import streamlit as st
from PIL import Image
st.set_page_config(page_title="چت بات توانا", page_icon="🪖", layout="wide")
# استایل
st.markdown("""
""", unsafe_allow_html=True)
# لوگو در وسط با columns
col1, col2, col3 = st.columns([1, 1, 1])
with col2:
try:
image = Image.open("army.png")
st.image(image, width=240)
except FileNotFoundError:
st.error("📁 فایل 'army.png' پیدا نشد. مطمئن شو کنار فایل اصلی Streamlit هست.")
# تیتر
st.markdown("""
""", unsafe_allow_html=True)
from transformers import AutoTokenizer, AutoModel
class HuggingFaceEmbeddings(Embeddings):
def __init__(self, model_name: str):
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModel.from_pretrained(model_name)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
embeddings = []
for text in texts:
inputs = self.tokenizer(text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
outputs = self.model(**inputs)
embeddings.append(outputs.last_hidden_state.mean(dim=1).squeeze().tolist())
return embeddings
def embed_query(self, text: str) -> List[float]:
return self.embed_documents([text])[0]
@st.cache_resource
def get_pdf_text(pdf_docs='C:/Users/itel/Desktop/your work data.pdf'):
pdf_reader = [PyPDFLoader(pdf_docs)]
embeddings = HuggingFaceInstructEmbeddings(model_name="SajjadAyoubi/xlm-roberta-large-fa-qa")
index = VectorstoreIndexCreator( embedding=embeddings, text_splitter=RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)).from_loaders(pdf_reader)
return index
index = get_pdf_index()
llm = ChatOpenAI(
base_url="https://api.together.xyz/v1",
api_key='0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979',
model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
)
chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type='stuff',
retriever=index.vectorstore.as_retriever(),
input_key='question'
)
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'pending_prompt' not in st.session_state:
st.session_state.pending_prompt = None
for msg in st.session_state.messages:
with st.chat_message(msg['role']):
st.markdown(f"🗨️ {msg['content']}", unsafe_allow_html=True)
prompt = st.chat_input("چطور میتونم کمک کنم؟")
if prompt:
st.session_state.messages.append({'role': 'user', 'content': prompt})
st.session_state.pending_prompt = prompt
st.rerun()
if st.session_state.pending_prompt:
with st.chat_message('ai'):
thinking = st.empty()
thinking.markdown("🤖 در حال فکر کردن...")
response = chain.run(f'لطفاً فقط به زبان فارسی پاسخ بده: {st.session_state.pending_prompt}')
answer = response.split("Helpful Answer:")[-1].strip()
if not answer:
answer = "متأسفم، اطلاعات دقیقی در این مورد ندارم."
thinking.empty()
full_response = ""
placeholder = st.empty()
for word in answer.split():
full_response += word + " "
placeholder.markdown(full_response + "▌")
time.sleep(0.03)
placeholder.markdown(full_response)
st.session_state.messages.append({'role': 'ai', 'content': full_response})
st.session_state.pending_prompt = None