Update app.py
Browse files
app.py
CHANGED
@@ -1,129 +1,129 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from langchain.prompts import PromptTemplate
|
3 |
-
from langchain.chains import LLMChain
|
4 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
-
import fitz
|
6 |
-
import json
|
7 |
-
|
8 |
-
# Title
|
9 |
-
st.title("π PDF-based MCQ Generator")
|
10 |
-
|
11 |
-
# Sidebar
|
12 |
-
st.sidebar.title("Upload & Settings")
|
13 |
-
|
14 |
-
# Upload PDF
|
15 |
-
pdf_file = st.sidebar.file_uploader("Upload a PDF file", type=["pdf"])
|
16 |
-
|
17 |
-
# Number of questions
|
18 |
-
number_of_questions = st.sidebar.slider("Number of questions", min_value=1, max_value=20, value=5)
|
19 |
-
|
20 |
-
# Session states
|
21 |
-
if "mcqs" not in st.session_state:
|
22 |
-
st.session_state.mcqs = []
|
23 |
-
if "current_q" not in st.session_state:
|
24 |
-
st.session_state.current_q = 0
|
25 |
-
if "user_answers" not in st.session_state:
|
26 |
-
st.session_state.user_answers = {}
|
27 |
-
if "quiz_finished" not in st.session_state:
|
28 |
-
st.session_state.quiz_finished = False
|
29 |
-
|
30 |
-
# Gemini setup
|
31 |
-
GOOGLE_API_KEY = "AIzaSyCB5NLx39vOAlfRQBDmnEG3uLBgLraGvH4"
|
32 |
-
llm = ChatGoogleGenerativeAI(
|
33 |
-
model="gemini-2.0-flash",
|
34 |
-
google_api_key=GOOGLE_API_KEY,
|
35 |
-
temperature=0.7
|
36 |
-
)
|
37 |
-
|
38 |
-
template = """
|
39 |
-
You are an expert MCQ generator. Generate {number} unique multiple-choice questions from the given text.
|
40 |
-
Each question must have exactly 1 correct answer and 3 incorrect options.
|
41 |
-
Strictly return output in the following JSON format (no explanations, no markdown):
|
42 |
-
|
43 |
-
[
|
44 |
-
{{
|
45 |
-
"question": "What is ...?",
|
46 |
-
"options": ["Option A", "Option B", "Option C", "Option D"],
|
47 |
-
"answer": "Option D"
|
48 |
-
}},
|
49 |
-
...
|
50 |
-
]
|
51 |
-
|
52 |
-
TEXT:
|
53 |
-
{text}
|
54 |
-
"""
|
55 |
-
|
56 |
-
prompt = PromptTemplate(
|
57 |
-
input_variables=["text", "number"],
|
58 |
-
template=template
|
59 |
-
)
|
60 |
-
|
61 |
-
mcq_chain = LLMChain(llm=llm, prompt=prompt)
|
62 |
-
|
63 |
-
# PDF text extractor
|
64 |
-
def extract_text_from_pdf(pdf):
|
65 |
-
doc = fitz.open(stream=pdf.read(), filetype="pdf")
|
66 |
-
full_text = ""
|
67 |
-
for page in doc:
|
68 |
-
full_text += page.get_text()
|
69 |
-
doc.close()
|
70 |
-
return full_text
|
71 |
-
|
72 |
-
# Generate MCQs
|
73 |
-
if st.sidebar.button("Generate MCQs"):
|
74 |
-
if pdf_file is None:
|
75 |
-
st.error("Please upload a PDF file.")
|
76 |
-
else:
|
77 |
-
with st.spinner("Extracting text and generating MCQs..."):
|
78 |
-
text = extract_text_from_pdf(pdf_file)
|
79 |
-
try:
|
80 |
-
response = mcq_chain.run(text=text, number=str(number_of_questions))
|
81 |
-
# st.subheader("π Raw Output (Debugging)")
|
82 |
-
# st.code(response)
|
83 |
-
mcqs_json = json.loads(response.strip()
|
84 |
-
st.session_state.mcqs = mcqs_json
|
85 |
-
st.session_state.current_q = 0
|
86 |
-
st.session_state.user_answers = {}
|
87 |
-
st.session_state.quiz_finished = False
|
88 |
-
st.success("β
MCQs generated successfully!")
|
89 |
-
except Exception as e:
|
90 |
-
st.error(f"Error generating MCQs: {e}")
|
91 |
-
|
92 |
-
# Display question
|
93 |
-
if st.session_state.mcqs and not st.session_state.quiz_finished:
|
94 |
-
idx = st.session_state.current_q
|
95 |
-
q_data = st.session_state.mcqs[idx]
|
96 |
-
|
97 |
-
st.subheader(f"Question {idx + 1}: {q_data['question']}")
|
98 |
-
selected_option = st.radio(
|
99 |
-
"Choose an answer:",
|
100 |
-
q_data["options"],
|
101 |
-
key=f"radio_{idx}"
|
102 |
-
)
|
103 |
-
|
104 |
-
if st.button("Next"):
|
105 |
-
st.session_state.user_answers[idx] = selected_option
|
106 |
-
|
107 |
-
if st.session_state.current_q < len(st.session_state.mcqs) - 1:
|
108 |
-
st.session_state.current_q += 1
|
109 |
-
else:
|
110 |
-
st.session_state.quiz_finished = True
|
111 |
-
st.success("π Quiz completed!")
|
112 |
-
|
113 |
-
# Show result
|
114 |
-
if st.session_state.quiz_finished:
|
115 |
-
st.header("π Quiz Results")
|
116 |
-
score = 0
|
117 |
-
total = len(st.session_state.mcqs)
|
118 |
-
|
119 |
-
for i, q in enumerate(st.session_state.mcqs):
|
120 |
-
user_ans = st.session_state.user_answers.get(i)
|
121 |
-
correct_ans = q["answer"]
|
122 |
-
if user_ans == correct_ans:
|
123 |
-
score += 1
|
124 |
-
st.markdown(f"**Q{i+1}: {q['question']}**")
|
125 |
-
st.markdown(f"- Your answer: {user_ans}")
|
126 |
-
st.markdown(f"- Correct answer: {correct_ans}")
|
127 |
-
st.markdown("---")
|
128 |
-
|
129 |
-
st.success(f"β
You scored {score} out of {total}")
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
+
import fitz
|
6 |
+
import json
|
7 |
+
|
8 |
+
# Title
|
9 |
+
st.title("π PDF-based MCQ Generator")
|
10 |
+
|
11 |
+
# Sidebar
|
12 |
+
st.sidebar.title("Upload & Settings")
|
13 |
+
|
14 |
+
# Upload PDF
|
15 |
+
pdf_file = st.sidebar.file_uploader("Upload a PDF file", type=["pdf"])
|
16 |
+
|
17 |
+
# Number of questions
|
18 |
+
number_of_questions = st.sidebar.slider("Number of questions", min_value=1, max_value=20, value=5)
|
19 |
+
|
20 |
+
# Session states
|
21 |
+
if "mcqs" not in st.session_state:
|
22 |
+
st.session_state.mcqs = []
|
23 |
+
if "current_q" not in st.session_state:
|
24 |
+
st.session_state.current_q = 0
|
25 |
+
if "user_answers" not in st.session_state:
|
26 |
+
st.session_state.user_answers = {}
|
27 |
+
if "quiz_finished" not in st.session_state:
|
28 |
+
st.session_state.quiz_finished = False
|
29 |
+
|
30 |
+
# Gemini setup
|
31 |
+
GOOGLE_API_KEY = "AIzaSyCB5NLx39vOAlfRQBDmnEG3uLBgLraGvH4"
|
32 |
+
llm = ChatGoogleGenerativeAI(
|
33 |
+
model="gemini-2.0-flash",
|
34 |
+
google_api_key=GOOGLE_API_KEY,
|
35 |
+
temperature=0.7
|
36 |
+
)
|
37 |
+
|
38 |
+
template = """
|
39 |
+
You are an expert MCQ generator. Generate {number} unique multiple-choice questions from the given text.
|
40 |
+
Each question must have exactly 1 correct answer and 3 incorrect options.
|
41 |
+
Strictly return output in the following JSON format (no explanations, no markdown):
|
42 |
+
|
43 |
+
[
|
44 |
+
{{
|
45 |
+
"question": "What is ...?",
|
46 |
+
"options": ["Option A", "Option B", "Option C", "Option D"],
|
47 |
+
"answer": "Option D"
|
48 |
+
}},
|
49 |
+
...
|
50 |
+
]
|
51 |
+
|
52 |
+
TEXT:
|
53 |
+
{text}
|
54 |
+
"""
|
55 |
+
|
56 |
+
prompt = PromptTemplate(
|
57 |
+
input_variables=["text", "number"],
|
58 |
+
template=template
|
59 |
+
)
|
60 |
+
|
61 |
+
mcq_chain = LLMChain(llm=llm, prompt=prompt)
|
62 |
+
|
63 |
+
# PDF text extractor
|
64 |
+
def extract_text_from_pdf(pdf):
|
65 |
+
doc = fitz.open(stream=pdf.read(), filetype="pdf")
|
66 |
+
full_text = ""
|
67 |
+
for page in doc:
|
68 |
+
full_text += page.get_text()
|
69 |
+
doc.close()
|
70 |
+
return full_text
|
71 |
+
|
72 |
+
# Generate MCQs
|
73 |
+
if st.sidebar.button("Generate MCQs"):
|
74 |
+
if pdf_file is None:
|
75 |
+
st.error("Please upload a PDF file.")
|
76 |
+
else:
|
77 |
+
with st.spinner("Extracting text and generating MCQs..."):
|
78 |
+
text = extract_text_from_pdf(pdf_file)
|
79 |
+
try:
|
80 |
+
response = mcq_chain.run(text=text, number=str(number_of_questions))
|
81 |
+
# st.subheader("π Raw Output (Debugging)")
|
82 |
+
# st.code(response)
|
83 |
+
mcqs_json = json.loads(response.strip())
|
84 |
+
st.session_state.mcqs = mcqs_json
|
85 |
+
st.session_state.current_q = 0
|
86 |
+
st.session_state.user_answers = {}
|
87 |
+
st.session_state.quiz_finished = False
|
88 |
+
st.success("β
MCQs generated successfully!")
|
89 |
+
except Exception as e:
|
90 |
+
st.error(f"Error generating MCQs: {e}")
|
91 |
+
|
92 |
+
# Display question
|
93 |
+
if st.session_state.mcqs and not st.session_state.quiz_finished:
|
94 |
+
idx = st.session_state.current_q
|
95 |
+
q_data = st.session_state.mcqs[idx]
|
96 |
+
|
97 |
+
st.subheader(f"Question {idx + 1}: {q_data['question']}")
|
98 |
+
selected_option = st.radio(
|
99 |
+
"Choose an answer:",
|
100 |
+
q_data["options"],
|
101 |
+
key=f"radio_{idx}"
|
102 |
+
)
|
103 |
+
|
104 |
+
if st.button("Next"):
|
105 |
+
st.session_state.user_answers[idx] = selected_option
|
106 |
+
|
107 |
+
if st.session_state.current_q < len(st.session_state.mcqs) - 1:
|
108 |
+
st.session_state.current_q += 1
|
109 |
+
else:
|
110 |
+
st.session_state.quiz_finished = True
|
111 |
+
st.success("π Quiz completed!")
|
112 |
+
|
113 |
+
# Show result
|
114 |
+
if st.session_state.quiz_finished:
|
115 |
+
st.header("π Quiz Results")
|
116 |
+
score = 0
|
117 |
+
total = len(st.session_state.mcqs)
|
118 |
+
|
119 |
+
for i, q in enumerate(st.session_state.mcqs):
|
120 |
+
user_ans = st.session_state.user_answers.get(i)
|
121 |
+
correct_ans = q["answer"]
|
122 |
+
if user_ans == correct_ans:
|
123 |
+
score += 1
|
124 |
+
st.markdown(f"**Q{i+1}: {q['question']}**")
|
125 |
+
st.markdown(f"- Your answer: {user_ans}")
|
126 |
+
st.markdown(f"- Correct answer: {correct_ans}")
|
127 |
+
st.markdown("---")
|
128 |
+
|
129 |
+
st.success(f"β
You scored {score} out of {total}")
|