AdaptiveHCI-2 / app.py
WSLINMSAI's picture
Update app.py
1238843 verified
import random
import gradio as gr
from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline
# -----------------------------
# 1. Load the Model & Slow Tokenizer
# Using the "valhalla/t5-base-qg-hl" model for improved question quality.
# -----------------------------
tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-base-qg-hl", use_fast=False)
model = T5ForConditionalGeneration.from_pretrained("valhalla/t5-base-qg-hl")
qg_pipeline = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer
)
# -----------------------------
# 2. Define Fixed Passages by Difficulty
# -----------------------------
passages = {
"easy": [
"What is the capital of France? <hl>Paris<hl>",
"What type of animal is a cat? <hl>Feline<hl>",
"What liquid is essential for all known forms of life? <hl>Water<hl>",
"In which direction does the sun rise? <hl>East<hl>",
"What quality are dogs best known for? <hl>Loyalty<hl>",
"Which planet do humans live on? <hl>Earth<hl>",
"Which fruit is commonly red and grows on trees? <hl>Apple<hl>",
"What device is used to tell time? <hl>Clock<hl>",
"Which object in the sky shines at night? <hl>Moon<hl>",
"What appears in the sky after rain? <hl>Rainbow<hl>"
],
"medium": [
"In which country was Albert Einstein born? <hl>Germany<hl>",
"What is the process by which plants convert sunlight into energy? <hl>Photosynthesis<hl>",
"Which explorer sailed across the Atlantic Ocean in 1492? <hl>Columbus<hl>",
"Which rainforest is the largest in the world? <hl>Amazon<hl>",
"Which ocean is the largest and deepest on Earth? <hl>Pacific<hl>",
"Which gas is essential for human breathing? <hl>Oxygen<hl>",
"Which galaxy contains our solar system? <hl>Milky<hl>",
"What structures in Egypt were built as tombs for pharaohs? <hl>Pyramids<hl>",
"Which revolution transformed manufacturing in the 18th and 19th centuries? <hl>Industrial<hl>",
"What system of government allows citizens to elect their leaders? <hl>Democracy<hl>"
],
"hard": [
"Which historical period was a European cultural and intellectual revival? <hl>Renaissance<hl>",
"Which branch of physics describes nature at the smallest scales? <hl>Quantum<hl>",
"What document established the framework of the U.S. government? <hl>Constitution<hl>",
"Which empire was the continuation of the Roman Empire in the East? <hl>Byzantine<hl>",
"Which fundamental particle gives mass to others? <hl>Higgs<hl>",
"Which principle states that position and momentum cannot both be precisely known? <hl>Heisenberg<hl>",
"What was the period of political tension between the U.S. and the Soviet Union called? <hl>Cold<hl>",
"Which collection of essays supported the ratification of the U.S. Constitution? <hl>Federalist<hl>",
"Which theoretical framework aims to unify quantum mechanics and relativity? <hl>String<hl>",
"Which field of computer science focuses on creating intelligent machines? <hl>Artificial<hl>"
]
}
# -----------------------------
# 3. Session State Initialization
# -----------------------------
def init_state():
return {
"difficulty": "easy",
"score": 0,
"question": "",
"answer": "",
"passage": ""
}
# -----------------------------
# 4. Adjust Difficulty Based on Score
# -----------------------------
def adjust_difficulty(state):
diff_order = ["easy", "medium", "hard"]
idx = diff_order.index(state["difficulty"])
if state["score"] >= 2 and idx < len(diff_order) - 1:
state["difficulty"] = diff_order[idx + 1]
state["score"] = 0 # Reset score when leveling up
return "Level up! Difficulty increased to: " + state["difficulty"]
elif state["score"] <= -2 and idx > 0:
state["difficulty"] = diff_order[idx - 1]
state["score"] = 0 # Reset score when leveling down
return "Don't worry! Difficulty decreased to: " + state["difficulty"]
else:
return f"Keep going! Current level: {state['difficulty']} (Score: {state['score']})"
# -----------------------------
# 5. Generate a Question from a Fixed Passage
# -----------------------------
def generate_question(state):
# Select a random passage from the fixed list based on the current difficulty.
passage = random.choice(passages[state["difficulty"]])
# Extract the answer from text between <hl> tokens.
parts = passage.split("<hl>")
if len(parts) >= 3:
answer = parts[1].strip()
else:
answer = "N/A"
# Use the QG pipeline to generate a question from the fixed passage.
result = qg_pipeline(passage, max_length=64)
question_text = result[0]["generated_text"]
# Update state with the chosen passage, generated question, and correct answer.
state["passage"] = passage
state["question"] = question_text
state["answer"] = answer
return question_text
# -----------------------------
# 6. Check the User's Answer
# Returns both the feedback message and a flag indicating correctness.
# -----------------------------
def check_answer(state, user_answer):
correct_answer = state["answer"].lower().strip()
user_answer_clean = user_answer.lower().strip()
if user_answer_clean == correct_answer:
state["score"] += 1
result_text = "Correct! Nice work!"
is_correct = True
else:
state["score"] -= 1
result_text = f"Oops! The correct answer was: {state['answer']}"
is_correct = False
difficulty_update = adjust_difficulty(state)
return result_text + "\n" + difficulty_update, is_correct
# -----------------------------
# 7. Build the Gradio Interface with a Hip, Rainbow Theme
# Display a confetti GIF for correct answers and a red X GIF for wrong answers.
# -----------------------------
custom_css = """
body {
background: linear-gradient(135deg, #ff0000, #ff7f00, #ffff00, #00ff00, #0000ff, #4b0082, #8f00ff);
font-family: 'Poppins', sans-serif;
}
.gradio-container {
background-color: rgba(255, 255, 255, 0.95);
border-radius: 20px;
padding: 30px;
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
}
.gradio-markdown h1 {
color: #ff1493;
text-shadow: 2px 2px 4px #000000;
}
.gradio-markdown p, .gradio-markdown li {
font-size: 18px;
color: #333333;
}
.gradio-button, .gradio-textbox {
font-size: 16px;
border-radius: 10px;
}
"""
with gr.Blocks(css=custom_css) as demo:
# Create persistent session state.
state = gr.State(init_state())
gr.Markdown("# Lingo Quest: The Ultimate Word Adventure")
gr.Markdown(
"Welcome to **Lingo Quest**! Embark on a vibrant journey where every challenge is a step toward mastering language. "
"Answer questions correctly to level up and unlock new challenges. Get ready to flex your word skills and become a language legend!"
)
# Display current level and score.
difficulty_label = gr.Markdown("**Current Level**: (will be updated)")
with gr.Row():
generate_button = gr.Button("Start Challenge!")
question_output = gr.Textbox(label="Challenge Question", interactive=False)
user_answer = gr.Textbox(label="Your Answer")
submit_button = gr.Button("Submit Answer")
result_output = gr.Textbox(label="Result", interactive=False)
feedback_gif_output = gr.HTML(label="Celebration / Error", value="")
def update_difficulty_label(state):
return f"**Current Level**: {state['difficulty']} (Score: {state['score']})"
# Update the difficulty label when the interface loads.
demo.load(fn=update_difficulty_label, inputs=state, outputs=difficulty_label)
def on_generate_question(state):
question = generate_question(state)
difficulty_text = update_difficulty_label(state)
# Clear any previous GIF when a new challenge starts.
return question, difficulty_text, ""
generate_button.click(
fn=on_generate_question,
inputs=state,
outputs=[question_output, difficulty_label, feedback_gif_output]
)
def on_submit_answer(user_answer, state):
feedback, is_correct = check_answer(state, user_answer)
difficulty_text = update_difficulty_label(state)
if is_correct:
# Confetti GIF for correct answers
gif_html = """
<div style="text-align: center;">
<img src="https://media.giphy.com/media/l4KhQo2MESJkc6QbS/giphy.gif" alt="Confetti!" style="width: 300px;"/>
</div>
"""
else:
# Red X or "Nope" GIF for incorrect answers
gif_html = """
<div style="text-align: center;">
<img src="https://media.giphy.com/media/hPPx8yk3Bmqys/giphy.gif" alt="Incorrect!" style="width: 200px;"/>
</div>
"""
return feedback, difficulty_text, gif_html
submit_button.click(
fn=on_submit_answer,
inputs=[user_answer, state],
outputs=[result_output, difficulty_label, feedback_gif_output]
)
demo.launch()