Update app.py
Browse files
app.py
CHANGED
@@ -1,122 +1,98 @@
|
|
1 |
import streamlit as st
|
2 |
-
|
3 |
from sentence_transformers import SentenceTransformer
|
4 |
-
from transformers import pipeline
|
5 |
import faiss
|
6 |
import numpy as np
|
|
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
font-size: 1.2em;
|
26 |
-
margin-bottom: 1rem;
|
27 |
-
}
|
28 |
-
.question-box {
|
29 |
-
background-color: #fff;
|
30 |
-
padding: 1rem;
|
31 |
-
border-radius: 10px;
|
32 |
-
box-shadow: 0px 2px 10px rgba(0,0,0,0.1);
|
33 |
-
margin-bottom: 1rem;
|
34 |
-
}
|
35 |
-
.example {
|
36 |
-
color: #444;
|
37 |
-
background: #e9f0ff;
|
38 |
-
padding: 0.5rem;
|
39 |
-
border-radius: 8px;
|
40 |
-
margin: 3px 0;
|
41 |
-
cursor: pointer;
|
42 |
-
}
|
43 |
-
</style>
|
44 |
-
""", unsafe_allow_html=True)
|
45 |
-
|
46 |
-
# ---------- PDF Reading ----------
|
47 |
-
def load_pdf_text(pdf_path):
|
48 |
-
reader = PdfReader(pdf_path)
|
49 |
-
text = ''
|
50 |
-
for page in reader.pages:
|
51 |
-
if page.extract_text():
|
52 |
-
text += page.extract_text()
|
53 |
-
return text
|
54 |
-
|
55 |
-
# ---------- Chunking ----------
|
56 |
-
def chunk_text(text, max_len=500):
|
57 |
-
sentences = text.split('. ')
|
58 |
-
chunks, chunk = [], ''
|
59 |
-
for sentence in sentences:
|
60 |
-
if len(chunk) + len(sentence) <= max_len:
|
61 |
-
chunk += sentence + '. '
|
62 |
-
else:
|
63 |
-
chunks.append(chunk.strip())
|
64 |
-
chunk = sentence + '. '
|
65 |
-
if chunk:
|
66 |
-
chunks.append(chunk.strip())
|
67 |
-
return chunks
|
68 |
-
|
69 |
-
# ---------- Embedding ----------
|
70 |
-
@st.cache_resource
|
71 |
-
def embed_chunks(chunks):
|
72 |
-
model = SentenceTransformer('all-MiniLM-L6-v2')
|
73 |
-
embeddings = model.encode(chunks)
|
74 |
-
return embeddings, model
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
index = faiss.IndexFlatL2(embeddings.shape[1])
|
80 |
index.add(np.array(embeddings))
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
st.
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
"
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import PyPDF2
|
3 |
from sentence_transformers import SentenceTransformer
|
|
|
4 |
import faiss
|
5 |
import numpy as np
|
6 |
+
from transformers import pipeline
|
7 |
|
8 |
+
st.set_page_config(page_title="π PDF QA RAG App", layout="wide")
|
9 |
+
|
10 |
+
# Custom styles
|
11 |
+
st.markdown("""
|
12 |
+
<style>
|
13 |
+
.main {background-color: #f7faff;}
|
14 |
+
.block-container {padding-top: 2rem;}
|
15 |
+
h1 {color: #4051b5;}
|
16 |
+
.stTextInput>div>div>input {border: 2px solid #d0d7ff;}
|
17 |
+
.stButton button {background-color: #4051b5; color: white; border-radius: 6px;}
|
18 |
+
.stSidebar {background-color: #eaf0ff;}
|
19 |
+
.sample-dropdown label {font-weight: bold;}
|
20 |
+
</style>
|
21 |
+
""", unsafe_allow_html=True)
|
22 |
+
|
23 |
+
st.title("π Ask Me Anything From Your PDF")
|
24 |
+
st.caption("Built using RAG (Retrieval-Augmented Generation) β¨")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
st.sidebar.header("π Upload PDF")
|
27 |
+
uploaded_file = st.sidebar.file_uploader("Upload a PDF file", type=["pdf"])
|
28 |
+
|
29 |
+
default_questions = [
|
30 |
+
"What is machine learning?",
|
31 |
+
"Explain generalization in ML.",
|
32 |
+
"What are different types of ML?",
|
33 |
+
"How is ML used in computer vision?",
|
34 |
+
"Describe the importance of training data."
|
35 |
+
]
|
36 |
+
|
37 |
+
@st.cache_data
|
38 |
+
def load_pdf(file):
|
39 |
+
reader = PyPDF2.PdfReader(file)
|
40 |
+
return [page.extract_text() for page in reader.pages]
|
41 |
+
|
42 |
+
def chunk_text(pages, max_len=1000):
|
43 |
+
text = " ".join(pages)
|
44 |
+
words = text.split()
|
45 |
+
return [' '.join(words[i:i+max_len]) for i in range(0, len(words), max_len)]
|
46 |
+
|
47 |
+
def create_faiss_index(chunks, model):
|
48 |
+
embeddings = model.encode(chunks)
|
49 |
index = faiss.IndexFlatL2(embeddings.shape[1])
|
50 |
index.add(np.array(embeddings))
|
51 |
+
return index, embeddings
|
52 |
+
|
53 |
+
def retrieve_context(question, chunks, index, model, k=6):
|
54 |
+
q_embedding = model.encode([question])
|
55 |
+
_, I = index.search(np.array(q_embedding), k)
|
56 |
+
return "\n\n".join([chunks[i] for i in I[0]])
|
57 |
+
|
58 |
+
if uploaded_file:
|
59 |
+
st.success("β
PDF uploaded successfully!")
|
60 |
+
|
61 |
+
pages = load_pdf(uploaded_file)
|
62 |
+
chunks = chunk_text(pages)
|
63 |
+
model = SentenceTransformer('all-MiniLM-L6-v2')
|
64 |
+
index, _ = create_faiss_index(chunks, model)
|
65 |
+
qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
|
66 |
+
|
67 |
+
st.subheader("π¬ Ask a question")
|
68 |
+
|
69 |
+
col1, col2 = st.columns([3, 1])
|
70 |
+
with col1:
|
71 |
+
question = st.text_input("Enter your question here...", placeholder="e.g. What is deep learning?")
|
72 |
+
with col2:
|
73 |
+
if st.button("Ask"):
|
74 |
+
with st.spinner("π§ Thinking..."):
|
75 |
+
context = retrieve_context(question, chunks, index, model)
|
76 |
+
result = qa_pipeline(question=question, context=context)
|
77 |
+
with st.expander("π Answer", expanded=True):
|
78 |
+
st.markdown(result['answer'])
|
79 |
+
|
80 |
+
st.divider()
|
81 |
+
st.subheader("β¨ Sample Questions")
|
82 |
+
selected_q = st.selectbox("Pick one to try:", default_questions, key="sample-dropdown")
|
83 |
+
if st.button("Try Selected Question"):
|
84 |
+
with st.spinner("β³ Searching..."):
|
85 |
+
context = retrieve_context(selected_q, chunks, index, model)
|
86 |
+
result = qa_pipeline(question=selected_q, context=context)
|
87 |
+
with st.expander(f"π‘ Answer to: '{selected_q}'", expanded=True):
|
88 |
+
st.markdown(result['answer'])
|
89 |
+
|
90 |
+
st.divider()
|
91 |
+
st.subheader("π Preview PDF Pages")
|
92 |
+
for i, page in enumerate(pages[:3]):
|
93 |
+
st.markdown(f"**Page {i+1}**")
|
94 |
+
st.code(page[:800] + "..." if len(page) > 800 else page)
|
95 |
+
|
96 |
+
else:
|
97 |
+
st.info("Upload a PDF from the sidebar to begin.")
|
98 |
+
|