File size: 1,424 Bytes
c152e15
 
4418e3f
 
14202aa
c152e15
4418e3f
b72ff58
14202aa
 
 
 
 
 
 
 
 
 
 
b72ff58
 
4418e3f
 
c152e15
 
4418e3f
 
 
 
c152e15
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
from transformers import pipeline

# Load the question generation model
question_gen = pipeline("text2text-generation", model="valhalla/t5-base-qg-hl")

# Function to generate questions
def generate_questions(text, num_questions, question_type):
    # Highlight the answer in the context using <hl> tags
    # For simplicity, we'll highlight the first sentence
    sentences = text.strip().split('.')
    if len(sentences) > 1:
        answer = sentences[0].strip()
        context = '. '.join(sentences[1:]).strip()
    else:
        answer = text.strip()
        context = text.strip()

    prompt = f"generate question: <hl> {answer} <hl> {context}"
    results = question_gen(prompt, max_length=128, num_return_sequences=num_questions)
    return "\n\n".join([f"{i+1}. {r['generated_text']}" for i, r in enumerate(results)])

# Gradio app
with gr.Blocks() as demo:
    gr.Markdown("# AI Mock Test Generator")
    input_text = gr.Textbox(lines=10, label="Paste your study material here")
    num_questions = gr.Slider(minimum=1, maximum=5, value=3, label="Number of Questions")
    question_type = gr.Radio(["subjective"], value="subjective", label="Question Type (only subjective supported now)")
    output = gr.Textbox(label="Generated Questions")
    btn = gr.Button("Generate")
    btn.click(fn=generate_questions, inputs=[input_text, num_questions, question_type], outputs=output)

demo.launch()