Fffffff / app.py
mocktestgen's picture
Update app.py
b72ff58 verified
raw
history blame
1.26 kB
import gradio as gr
from transformers import pipeline
question_gen = pipeline("text2text-generation", model="valhalla/t5-base-qg-hl")
def generate_questions(text, num_questions, question_type):
# Insert highlight tags around the first sentence for question generation
sentences = text.split('.')
if len(sentences) > 1:
highlighted = f"<hl> {sentences[0].strip()} <hl>." + '.'.join(sentences[1:])
else:
highlighted = f"<hl> {text.strip()} <hl>"
prompt = f"generate questions: {highlighted}"
results = question_gen(prompt, max_length=128, num_return_sequences=num_questions)
return "\n\n".join([f"{i+1}. {r['generated_text']}" for i, r in enumerate(results)])
with gr.Blocks() as demo:
gr.Markdown("# AI Mock Test Generator")
input_text = gr.Textbox(lines=10, label="Paste text or content here")
num_questions = gr.Slider(minimum=1, maximum=10, value=5, label="Number of Questions")
question_type = gr.Radio(choices=["mcq", "subjective", "mixed"], value="mixed", label="Question Type")
output = gr.Textbox(label="Generated Questions", lines=10)
btn = gr.Button("Generate")
btn.click(fn=generate_questions, inputs=[input_text, num_questions, question_type], outputs=output)
demo.launch()