import random import gradio as gr from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline # ----------------------------- # 1. Load the Model & Slow Tokenizer # Using the "valhalla/t5-base-qg-hl" model for improved question quality. # ----------------------------- tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-base-qg-hl", use_fast=False) model = T5ForConditionalGeneration.from_pretrained("valhalla/t5-base-qg-hl") qg_pipeline = pipeline( "text2text-generation", model=model, tokenizer=tokenizer ) # ----------------------------- # 2. Define Fixed Passages by Difficulty # ----------------------------- passages = { "easy": [ "What is the capital of France? Paris", "What type of animal is a cat? Feline", "What liquid is essential for all known forms of life? Water", "In which direction does the sun rise? East", "What quality are dogs best known for? Loyalty", "Which planet do humans live on? Earth", "Which fruit is commonly red and grows on trees? Apple", "What device is used to tell time? Clock", "Which object in the sky shines at night? Moon", "What appears in the sky after rain? Rainbow" ], "medium": [ "In which country was Albert Einstein born? Germany", "What is the process by which plants convert sunlight into energy? Photosynthesis", "Which explorer sailed across the Atlantic Ocean in 1492? Columbus", "Which rainforest is the largest in the world? Amazon", "Which ocean is the largest and deepest on Earth? Pacific", "Which gas is essential for human breathing? Oxygen", "Which galaxy contains our solar system? Milky", "What structures in Egypt were built as tombs for pharaohs? Pyramids", "Which revolution transformed manufacturing in the 18th and 19th centuries? Industrial", "What system of government allows citizens to elect their leaders? Democracy" ], "hard": [ "Which historical period was a European cultural and intellectual revival? Renaissance", "Which branch of physics describes nature at the smallest scales? Quantum", "What document established the framework of the U.S. government? Constitution", "Which empire was the continuation of the Roman Empire in the East? Byzantine", "Which fundamental particle gives mass to others? Higgs", "Which principle states that position and momentum cannot both be precisely known? Heisenberg", "What was the period of political tension between the U.S. and the Soviet Union called? Cold", "Which collection of essays supported the ratification of the U.S. Constitution? Federalist", "Which theoretical framework aims to unify quantum mechanics and relativity? String", "Which field of computer science focuses on creating intelligent machines? Artificial" ] } # ----------------------------- # 3. Session State Initialization # ----------------------------- def init_state(): return { "difficulty": "easy", "score": 0, "question": "", "answer": "", "passage": "" } # ----------------------------- # 4. Adjust Difficulty Based on Score # ----------------------------- def adjust_difficulty(state): diff_order = ["easy", "medium", "hard"] idx = diff_order.index(state["difficulty"]) if state["score"] >= 2 and idx < len(diff_order) - 1: state["difficulty"] = diff_order[idx + 1] state["score"] = 0 # Reset score when leveling up return "Level up! Difficulty increased to: " + state["difficulty"] elif state["score"] <= -2 and idx > 0: state["difficulty"] = diff_order[idx - 1] state["score"] = 0 # Reset score when leveling down return "Don't worry! Difficulty decreased to: " + state["difficulty"] else: return f"Keep going! Current level: {state['difficulty']} (Score: {state['score']})" # ----------------------------- # 5. Generate a Question from a Fixed Passage # ----------------------------- def generate_question(state): # Select a random passage from the fixed list based on the current difficulty. passage = random.choice(passages[state["difficulty"]]) # Extract the answer from text between tokens. parts = passage.split("") if len(parts) >= 3: answer = parts[1].strip() else: answer = "N/A" # Use the QG pipeline to generate a question from the fixed passage. result = qg_pipeline(passage, max_length=64) question_text = result[0]["generated_text"] # Update state with the chosen passage, generated question, and correct answer. state["passage"] = passage state["question"] = question_text state["answer"] = answer return question_text # ----------------------------- # 6. Check the User's Answer # Returns both the feedback message and a flag indicating correctness. # ----------------------------- def check_answer(state, user_answer): correct_answer = state["answer"].lower().strip() user_answer_clean = user_answer.lower().strip() if user_answer_clean == correct_answer: state["score"] += 1 result_text = "Correct! Nice work!" is_correct = True else: state["score"] -= 1 result_text = f"Oops! The correct answer was: {state['answer']}" is_correct = False difficulty_update = adjust_difficulty(state) return result_text + "\n" + difficulty_update, is_correct # ----------------------------- # 7. Build the Gradio Interface with a Hip, Rainbow Theme # Display a confetti GIF for correct answers and a red X GIF for wrong answers. # ----------------------------- custom_css = """ body { background: linear-gradient(135deg, #ff0000, #ff7f00, #ffff00, #00ff00, #0000ff, #4b0082, #8f00ff); font-family: 'Poppins', sans-serif; } .gradio-container { background-color: rgba(255, 255, 255, 0.95); border-radius: 20px; padding: 30px; box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3); } .gradio-markdown h1 { color: #ff1493; text-shadow: 2px 2px 4px #000000; } .gradio-markdown p, .gradio-markdown li { font-size: 18px; color: #333333; } .gradio-button, .gradio-textbox { font-size: 16px; border-radius: 10px; } """ with gr.Blocks(css=custom_css) as demo: # Create persistent session state. state = gr.State(init_state()) gr.Markdown("# Lingo Quest: The Ultimate Word Adventure") gr.Markdown( "Welcome to **Lingo Quest**! Embark on a vibrant journey where every challenge is a step toward mastering language. " "Answer questions correctly to level up and unlock new challenges. Get ready to flex your word skills and become a language legend!" ) # Display current level and score. difficulty_label = gr.Markdown("**Current Level**: (will be updated)") with gr.Row(): generate_button = gr.Button("Start Challenge!") question_output = gr.Textbox(label="Challenge Question", interactive=False) user_answer = gr.Textbox(label="Your Answer") submit_button = gr.Button("Submit Answer") result_output = gr.Textbox(label="Result", interactive=False) feedback_gif_output = gr.HTML(label="Celebration / Error", value="") def update_difficulty_label(state): return f"**Current Level**: {state['difficulty']} (Score: {state['score']})" # Update the difficulty label when the interface loads. demo.load(fn=update_difficulty_label, inputs=state, outputs=difficulty_label) def on_generate_question(state): question = generate_question(state) difficulty_text = update_difficulty_label(state) # Clear any previous GIF when a new challenge starts. return question, difficulty_text, "" generate_button.click( fn=on_generate_question, inputs=state, outputs=[question_output, difficulty_label, feedback_gif_output] ) def on_submit_answer(user_answer, state): feedback, is_correct = check_answer(state, user_answer) difficulty_text = update_difficulty_label(state) if is_correct: # Confetti GIF for correct answers gif_html = """
Confetti!
""" else: # Red X or "Nope" GIF for incorrect answers gif_html = """
Incorrect!
""" return feedback, difficulty_text, gif_html submit_button.click( fn=on_submit_answer, inputs=[user_answer, state], outputs=[result_output, difficulty_label, feedback_gif_output] ) demo.launch()