import time
import streamlit as st
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import FAISS
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from typing import List
from together import Together
import pandas as pd
import streamlit as st
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from sentence_transformers import SentenceTransformer
import faiss
# ----------------- تنظیمات صفحه -----------------
st.set_page_config(page_title="رزم یار ارتش", page_icon="🪖", layout="wide")
# ----------------- استایل سفارشی -----------------
st.markdown("""
""", unsafe_allow_html=True)
# ----------------- بدنه اصلی -----------------
with st.sidebar:
st.image("log.png", width=90)
st.markdown("""
""", unsafe_allow_html=True)
st.markdown("""
""", unsafe_allow_html=True)
# محتوای اصلی
st.markdown("""
""", unsafe_allow_html=True)
st.markdown('👋 سلام! چطور میتونم کمکتون کنم؟
', unsafe_allow_html=True)
# چت اینپوت کاربر
#user_input = st.text_input("پیام خود را وارد کنید...")
#if user_input:
# st.markdown(f'📩 شما: {user_input}
', unsafe_allow_html=True)
# ⚙️ مدل Embedding ساده و سریع
@st.cache_resource
def get_embedding_model():
return SentenceTransformer("HooshvareLab/bert-fa-zwnj-base")
@st.cache_resource
def process_csv(csv_file):
df = pd.read_csv(csv_file)
texts = df.iloc[:, 0].astype(str).tolist()
texts = [text for text in texts if text.strip()]
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=200,
chunk_overlap=50,
length_function=len,
separators=["\n\n", "\n", " ", ""]
)
split_texts = []
for text in texts:
split_texts.extend(text_splitter.split_text(text))
# مدل امبدینگ
model = get_embedding_model()
embeddings = model.encode(split_texts, show_progress_bar=True)
dim = embeddings.shape[1]
index = faiss.IndexHNSWFlat(dim, 32)
index.hnsw.efSearch = 50
index.add(np.array(embeddings))
return split_texts, embeddings, index
# مسیر فایل CSV
csv_file_path = 'output (1).csv'
texts, vectors, index = process_csv(csv_file_path)
# رابط چت
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'pending_prompt' not in st.session_state:
st.session_state.pending_prompt = None
for msg in st.session_state.messages:
with st.chat_message(msg['role']):
st.markdown(msg['content'], unsafe_allow_html=True)
query = st.chat_input("سؤالت را بپرس...")
if query:
st.session_state.messages.append({'role': 'user', 'content': query})
st.session_state.pending_prompt = query
st.rerun()
if st.session_state.pending_prompt:
with st.chat_message("ai"):
thinking = st.empty()
thinking.markdown("🤖 در حال جستجو...")
model = get_embedding_model()
query_vector = model.encode([st.session_state.pending_prompt])
D, I = index.search(np.array(query_vector), k=10)
top_indices = I[0]
top_texts = [texts[i] for i in top_indices]
top_vectors = np.array([vectors[i] for i in top_indices])
similarities = cosine_similarity(query_vector, top_vectors)[0]
# پیدا کردن دقیقترین متن
best_match_relative_index = np.argmax(similarities)
best_match_index = top_indices[best_match_relative_index]
best_match_text = texts[best_match_index]
response = "🧠 پاسخ سوال :\n\n" .join(best_match_text)
thinking.empty()
full_response = ""
placeholder = st.empty()
for word in response.split():
full_response += word + " "
placeholder.markdown(full_response + "▌")
time.sleep(0.02)
placeholder.markdown(full_response)
st.session_state.messages.append({'role': 'ai', 'content': full_response})
st.session_state.pending_prompt = None