M17idd commited on
Commit
9fe2e05
·
1 Parent(s): a4d9300
Files changed (1) hide show
  1. PdfChatbot.py +114 -0
PdfChatbot.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import streamlit as st
3
+ st.set_page_config(page_title="چت بات ارتش", page_icon="🪖", layout="wide")
4
+ st.markdown("""
5
+ <style>
6
+ .main {
7
+ background-color: #f4f6f7;
8
+ }
9
+ .stChatMessage {
10
+ background-color: #e8f0fe;
11
+ border-radius: 12px;
12
+ padding: 10px;
13
+ margin-bottom: 10px;
14
+ }
15
+ </style>
16
+ """, unsafe_allow_html=True)
17
+
18
+ from langchain.document_loaders import PyPDFLoader
19
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
20
+ from langchain.embeddings.base import Embeddings
21
+ from langchain.vectorstores import FAISS
22
+ from langchain.indexes import VectorstoreIndexCreator
23
+ from langchain.chains import RetrievalQA
24
+ from langchain.chat_models import ChatOpenAI
25
+ from typing import List
26
+ from together import Together
27
+
28
+ class TogetherEmbeddings(Embeddings):
29
+ def __init__(self, model_name: str, api_key: str):
30
+ self.model_name = model_name
31
+ self.client = Together(api_key=api_key)
32
+
33
+ def embed_documents(self, texts: List[str]) -> List[List[float]]:
34
+ response = self.client.embeddings.create(
35
+ model=self.model_name,
36
+ input=texts
37
+ )
38
+ return [item.embedding for item in response.data]
39
+
40
+ def embed_query(self, text: str) -> List[float]:
41
+ return self.embed_documents([text])[0]
42
+
43
+ @st.cache_resource
44
+ def get_pdf_index():
45
+ with st.spinner('لطفاً لحظه‌ای صبر کنید...'):
46
+ pdf_reader = [PyPDFLoader('C:/Users/ici/Desktop/test1.pdf')]
47
+ embeddings = TogetherEmbeddings(
48
+ model_name="togethercomputer/m2-bert-80M-8k-retrieval",
49
+ api_key="0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979"
50
+ )
51
+ return VectorstoreIndexCreator(
52
+ embedding=embeddings,
53
+ text_splitter=RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)
54
+ ).from_loaders(pdf_reader)
55
+
56
+ index = get_pdf_index()
57
+ llm = ChatOpenAI(
58
+ base_url="https://api.together.xyz/v1",
59
+ api_key='0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979',
60
+ model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
61
+ )
62
+ chain = RetrievalQA.from_chain_type(
63
+ llm=llm,
64
+ chain_type='stuff',
65
+ retriever=index.vectorstore.as_retriever(),
66
+ input_key='question'
67
+ )
68
+
69
+ # --- UI زیباسازی ---
70
+
71
+ col1, col2 = st.columns([1, 10])
72
+ with col1:
73
+ st.image("army.png", width=70)
74
+ with col2:
75
+ st.title('🤖 چت‌بات هوشمند ارتش')
76
+
77
+ if 'messages' not in st.session_state:
78
+ st.session_state.messages = []
79
+
80
+ if 'pending_prompt' not in st.session_state:
81
+ st.session_state.pending_prompt = None
82
+
83
+ for message in st.session_state.messages:
84
+ with st.chat_message(message['role']):
85
+ st.markdown(f"🗨️ {message['content']}", unsafe_allow_html=True)
86
+
87
+ prompt = st.chat_input('چطور می‌تونم کمک کنم؟')
88
+
89
+ if prompt:
90
+ st.session_state.messages.append({'role': 'user', 'content': prompt})
91
+ st.session_state.pending_prompt = prompt
92
+ st.rerun()
93
+
94
+ if st.session_state.pending_prompt:
95
+ with st.chat_message('ai'):
96
+ thinking_placeholder = st.empty()
97
+ thinking_placeholder.markdown("🤖 در حال فکر کردن...")
98
+
99
+ response = chain.run(f'persian {st.session_state.pending_prompt}')
100
+ helpful_answer = response.split("Helpful Answer:")[-1]
101
+ if not helpful_answer.strip():
102
+ helpful_answer = "اطلاعات دقیقی در دسترس نیست، اما می‌توانم به شما کمک کنم تا از منابع دیگر بررسی کنید."
103
+
104
+ thinking_placeholder.empty()
105
+ full_response = ""
106
+ placeholder = st.empty()
107
+ for chunk in helpful_answer.split():
108
+ full_response += chunk + " "
109
+ placeholder.markdown(full_response + "▌")
110
+ time.sleep(0.03)
111
+
112
+ placeholder.markdown(full_response)
113
+ st.session_state.messages.append({'role': 'ai', 'content': full_response})
114
+ st.session_state.pending_prompt = None