File size: 9,302 Bytes
22b7ceb
4c96ad5
aa8cdfb
22b7ceb
 
ab2ce90
 
22b7ceb
a448117
 
22b7ceb
 
aa8cdfb
 
4c96ad5
 
22b7ceb
ab2ce90
22b7ceb
ab2ce90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22b7ceb
 
aa8cdfb
22b7ceb
 
 
 
 
 
 
c789898
22b7ceb
 
 
aa8cdfb
22b7ceb
 
 
 
 
 
ab2ce90
c789898
22b7ceb
 
ab2ce90
c789898
22b7ceb
c789898
22b7ceb
 
ab2ce90
22b7ceb
 
ab2ce90
 
c789898
ab2ce90
c789898
 
22b7ceb
 
 
c789898
ab2ce90
c789898
22b7ceb
c789898
ab2ce90
 
22b7ceb
 
 
 
 
 
04be7b6
22b7ceb
 
 
 
 
 
c789898
04be7b6
22b7ceb
 
c789898
04be7b6
22b7ceb
04be7b6
22b7ceb
 
1238843
 
22b7ceb
a448117
 
c789898
 
a448117
 
c789898
 
 
 
a448117
 
c789898
 
a448117
 
c789898
a448117
 
 
 
c789898
a448117
 
 
 
04be7b6
22b7ceb
c789898
 
aa8cdfb
c789898
 
aa8cdfb
c789898
 
 
 
22b7ceb
c789898
 
 
22b7ceb
 
 
1238843
c789898
22b7ceb
c789898
 
04be7b6
22b7ceb
c789898
22b7ceb
 
 
1238843
04be7b6
c789898
04be7b6
 
 
1238843
04be7b6
c789898
22b7ceb
04be7b6
22b7ceb
04be7b6
1238843
 
04be7b6
1238843
04be7b6
 
 
1238843
 
 
 
 
 
 
c789898
04be7b6
 
 
1238843
04be7b6
c789898
30da7a7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
import random
import gradio as gr
from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline

# -----------------------------
# 1. Load the Model & Slow Tokenizer
#    Using the "valhalla/t5-base-qg-hl" model for improved question quality.
# -----------------------------
tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-base-qg-hl", use_fast=False)
model = T5ForConditionalGeneration.from_pretrained("valhalla/t5-base-qg-hl")
qg_pipeline = pipeline(
    "text2text-generation",
    model=model,
    tokenizer=tokenizer
)

# -----------------------------
# 2. Define Fixed Passages by Difficulty
# -----------------------------
passages = {
    "easy": [
        "What is the capital of France? <hl>Paris<hl>",
        "What type of animal is a cat? <hl>Feline<hl>",
        "What liquid is essential for all known forms of life? <hl>Water<hl>",
        "In which direction does the sun rise? <hl>East<hl>",
        "What quality are dogs best known for? <hl>Loyalty<hl>",
        "Which planet do humans live on? <hl>Earth<hl>",
        "Which fruit is commonly red and grows on trees? <hl>Apple<hl>",
        "What device is used to tell time? <hl>Clock<hl>",
        "Which object in the sky shines at night? <hl>Moon<hl>",
        "What appears in the sky after rain? <hl>Rainbow<hl>"
    ],
    "medium": [
        "In which country was Albert Einstein born? <hl>Germany<hl>",
        "What is the process by which plants convert sunlight into energy? <hl>Photosynthesis<hl>",
        "Which explorer sailed across the Atlantic Ocean in 1492? <hl>Columbus<hl>",
        "Which rainforest is the largest in the world? <hl>Amazon<hl>",
        "Which ocean is the largest and deepest on Earth? <hl>Pacific<hl>",
        "Which gas is essential for human breathing? <hl>Oxygen<hl>",
        "Which galaxy contains our solar system? <hl>Milky<hl>",
        "What structures in Egypt were built as tombs for pharaohs? <hl>Pyramids<hl>",
        "Which revolution transformed manufacturing in the 18th and 19th centuries? <hl>Industrial<hl>",
        "What system of government allows citizens to elect their leaders? <hl>Democracy<hl>"
    ],
    "hard": [
        "Which historical period was a European cultural and intellectual revival? <hl>Renaissance<hl>",
        "Which branch of physics describes nature at the smallest scales? <hl>Quantum<hl>",
        "What document established the framework of the U.S. government? <hl>Constitution<hl>",
        "Which empire was the continuation of the Roman Empire in the East? <hl>Byzantine<hl>",
        "Which fundamental particle gives mass to others? <hl>Higgs<hl>",
        "Which principle states that position and momentum cannot both be precisely known? <hl>Heisenberg<hl>",
        "What was the period of political tension between the U.S. and the Soviet Union called? <hl>Cold<hl>",
        "Which collection of essays supported the ratification of the U.S. Constitution? <hl>Federalist<hl>",
        "Which theoretical framework aims to unify quantum mechanics and relativity? <hl>String<hl>",
        "Which field of computer science focuses on creating intelligent machines? <hl>Artificial<hl>"
    ]
}

# -----------------------------
# 3. Session State Initialization
# -----------------------------
def init_state():
    return {
        "difficulty": "easy",
        "score": 0,
        "question": "",
        "answer": "",
        "passage": ""
    }

# -----------------------------
# 4. Adjust Difficulty Based on Score
# -----------------------------
def adjust_difficulty(state):
    diff_order = ["easy", "medium", "hard"]
    idx = diff_order.index(state["difficulty"])
    if state["score"] >= 2 and idx < len(diff_order) - 1:
        state["difficulty"] = diff_order[idx + 1]
        state["score"] = 0  # Reset score when leveling up
        return "Level up! Difficulty increased to: " + state["difficulty"]
    elif state["score"] <= -2 and idx > 0:
        state["difficulty"] = diff_order[idx - 1]
        state["score"] = 0  # Reset score when leveling down
        return "Don't worry! Difficulty decreased to: " + state["difficulty"]
    else:
        return f"Keep going! Current level: {state['difficulty']} (Score: {state['score']})"

# -----------------------------
# 5. Generate a Question from a Fixed Passage
# -----------------------------
def generate_question(state):
    # Select a random passage from the fixed list based on the current difficulty.
    passage = random.choice(passages[state["difficulty"]])
    
    # Extract the answer from text between <hl> tokens.
    parts = passage.split("<hl>")
    if len(parts) >= 3:
        answer = parts[1].strip()
    else:
        answer = "N/A"
    
    # Use the QG pipeline to generate a question from the fixed passage.
    result = qg_pipeline(passage, max_length=64)
    question_text = result[0]["generated_text"]
    
    # Update state with the chosen passage, generated question, and correct answer.
    state["passage"] = passage
    state["question"] = question_text
    state["answer"] = answer
    return question_text

# -----------------------------
# 6. Check the User's Answer
#    Returns both the feedback message and a flag indicating correctness.
# -----------------------------
def check_answer(state, user_answer):
    correct_answer = state["answer"].lower().strip()
    user_answer_clean = user_answer.lower().strip()
    if user_answer_clean == correct_answer:
        state["score"] += 1
        result_text = "Correct! Nice work!"
        is_correct = True
    else:
        state["score"] -= 1
        result_text = f"Oops! The correct answer was: {state['answer']}"
        is_correct = False
    difficulty_update = adjust_difficulty(state)
    return result_text + "\n" + difficulty_update, is_correct

# -----------------------------
# 7. Build the Gradio Interface with a Hip, Rainbow Theme
#    Display a confetti GIF for correct answers and a red X GIF for wrong answers.
# -----------------------------
custom_css = """
body {
    background: linear-gradient(135deg, #ff0000, #ff7f00, #ffff00, #00ff00, #0000ff, #4b0082, #8f00ff);
    font-family: 'Poppins', sans-serif;
}
.gradio-container {
    background-color: rgba(255, 255, 255, 0.95);
    border-radius: 20px;
    padding: 30px;
    box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
}
.gradio-markdown h1 {
    color: #ff1493;
    text-shadow: 2px 2px 4px #000000;
}
.gradio-markdown p, .gradio-markdown li {
    font-size: 18px;
    color: #333333;
}
.gradio-button, .gradio-textbox {
    font-size: 16px;
    border-radius: 10px;
}
"""

with gr.Blocks(css=custom_css) as demo:
    # Create persistent session state.
    state = gr.State(init_state())
    
    gr.Markdown("# Lingo Quest: The Ultimate Word Adventure")
    gr.Markdown(
        "Welcome to **Lingo Quest**! Embark on a vibrant journey where every challenge is a step toward mastering language. "
        "Answer questions correctly to level up and unlock new challenges. Get ready to flex your word skills and become a language legend!"
    )
    
    # Display current level and score.
    difficulty_label = gr.Markdown("**Current Level**: (will be updated)")
    
    with gr.Row():
        generate_button = gr.Button("Start Challenge!")
        question_output = gr.Textbox(label="Challenge Question", interactive=False)
    
    user_answer = gr.Textbox(label="Your Answer")
    submit_button = gr.Button("Submit Answer")
    result_output = gr.Textbox(label="Result", interactive=False)
    feedback_gif_output = gr.HTML(label="Celebration / Error", value="")
    
    def update_difficulty_label(state):
        return f"**Current Level**: {state['difficulty']} (Score: {state['score']})"
    
    # Update the difficulty label when the interface loads.
    demo.load(fn=update_difficulty_label, inputs=state, outputs=difficulty_label)
    
    def on_generate_question(state):
        question = generate_question(state)
        difficulty_text = update_difficulty_label(state)
        # Clear any previous GIF when a new challenge starts.
        return question, difficulty_text, ""
    
    generate_button.click(
        fn=on_generate_question, 
        inputs=state, 
        outputs=[question_output, difficulty_label, feedback_gif_output]
    )
    
    def on_submit_answer(user_answer, state):
        feedback, is_correct = check_answer(state, user_answer)
        difficulty_text = update_difficulty_label(state)
        if is_correct:
            # Confetti GIF for correct answers
            gif_html = """
            <div style="text-align: center;">
                <img src="https://media.giphy.com/media/l4KhQo2MESJkc6QbS/giphy.gif" alt="Confetti!" style="width: 300px;"/>
            </div>
            """
        else:
            # Red X or "Nope" GIF for incorrect answers
            gif_html = """
            <div style="text-align: center;">
                <img src="https://media.giphy.com/media/hPPx8yk3Bmqys/giphy.gif" alt="Incorrect!" style="width: 200px;"/>
            </div>
            """
        return feedback, difficulty_text, gif_html
    
    submit_button.click(
        fn=on_submit_answer, 
        inputs=[user_answer, state], 
        outputs=[result_output, difficulty_label, feedback_gif_output]
    )
    
demo.launch()