WSLINMSAI commited on
Commit
c789898
·
verified ·
1 Parent(s): edda13d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -78
app.py CHANGED
@@ -3,9 +3,9 @@ import gradio as gr
3
  from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline
4
 
5
  # -----------------------------
6
- # 1. Load a Better Model & Slow Tokenizer
7
- # Using the "valhalla/t5-base-qg-hl" model (a step up from the small version)
8
- # and disabling the fast tokenizer.
9
  # -----------------------------
10
  tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-base-qg-hl", use_fast=False)
11
  model = T5ForConditionalGeneration.from_pretrained("valhalla/t5-base-qg-hl")
@@ -15,26 +15,54 @@ qg_pipeline = pipeline(
15
  tokenizer=tokenizer
16
  )
17
 
 
 
 
 
 
 
 
 
18
  # -----------------------------
19
- # 2. Define Passages by Difficulty
 
20
  # -----------------------------
21
- passages = {
22
- "easy": [
23
- "The capital of <hl>France<hl> is Paris.",
24
- "A cat is a small <hl>domesticated<hl> feline.",
25
- "<hl>Water<hl> is essential for all known forms of life.",
26
- ],
27
- "medium": [
28
- "Albert Einstein was born in <hl>Germany<hl> in 1879.",
29
- "The process of <hl>photosynthesis<hl> occurs in plants and algae.",
30
- "In 1492, <hl>Christopher Columbus<hl> sailed across the Atlantic Ocean.",
31
- ],
32
- "hard": [
33
- "The <hl>Renaissance<hl> was a fervent period of European cultural, artistic, political, and economic rebirth.",
34
- "<hl>Quantum mechanics<hl> is a fundamental theory in physics describing nature at the smallest scales.",
35
- "The <hl>Constitution<hl> of the United States was ratified in 1788, establishing the framework of government.",
36
- ],
37
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  # -----------------------------
40
  # 3. Session State Initialization
@@ -45,6 +73,7 @@ def init_state():
45
  "score": 0,
46
  "question": "",
47
  "answer": "",
 
48
  }
49
 
50
  # -----------------------------
@@ -53,41 +82,38 @@ def init_state():
53
  def adjust_difficulty(state):
54
  diff_order = ["easy", "medium", "hard"]
55
  idx = diff_order.index(state["difficulty"])
56
-
57
  if state["score"] >= 2 and idx < len(diff_order) - 1:
58
  state["difficulty"] = diff_order[idx + 1]
59
- state["score"] = 0 # Reset score upon difficulty change
60
- return "Difficulty increased to: " + state["difficulty"]
61
  elif state["score"] <= -2 and idx > 0:
62
  state["difficulty"] = diff_order[idx - 1]
63
- state["score"] = 0 # Reset score upon difficulty change
64
- return "Difficulty decreased to: " + state["difficulty"]
65
  else:
66
- return f"Difficulty remains: {state['difficulty']} (Score: {state['score']})"
67
 
68
  # -----------------------------
69
- # 5. Generate a Question from a Passage
70
  # -----------------------------
71
  def generate_question(state):
72
- # Select a random passage from the current difficulty level
73
- passage_list = passages[state["difficulty"]]
74
- chosen_passage = random.choice(passage_list)
75
-
76
- # Extract the answer from text between <hl> tags.
77
- parts = chosen_passage.split("<hl>")
78
- if len(parts) == 3:
79
  answer = parts[1].strip()
80
  else:
81
  answer = "N/A"
82
-
83
- # Generate a question using the QG pipeline.
84
- result = qg_pipeline(chosen_passage, max_length=64)
85
  question_text = result[0]["generated_text"]
86
-
87
- # Update the state with the generated question and correct answer.
88
  state["question"] = question_text
89
  state["answer"] = answer
90
-
91
  return question_text
92
 
93
  # -----------------------------
@@ -96,85 +122,80 @@ def generate_question(state):
96
  def check_answer(state, user_answer):
97
  correct_answer = state["answer"].lower().strip()
98
  user_answer_clean = user_answer.lower().strip()
99
-
100
  if user_answer_clean == correct_answer:
101
  state["score"] += 1
102
- result_text = "Correct!"
103
  else:
104
  state["score"] -= 1
105
- result_text = f"Incorrect! The correct answer was: {state['answer']}"
106
-
107
- # Adjust the difficulty based on updated score.
108
  difficulty_update = adjust_difficulty(state)
109
  return result_text + "\n" + difficulty_update
110
 
111
  # -----------------------------
112
- # 7. Build the Gradio Interface with Custom CSS for a More Colorful UI
113
  # -----------------------------
114
  custom_css = """
115
  body {
116
- background: linear-gradient(135deg, #e0c3fc, #8ec5fc);
117
- font-family: 'Helvetica', sans-serif;
118
  }
119
  .gradio-container {
120
- background-color: #ffffff;
121
- border-radius: 15px;
122
- padding: 20px;
123
- box-shadow: 0 4px 12px rgba(0,0,0,0.15);
124
  }
125
  .gradio-markdown h1 {
126
- color: #4b0082;
 
127
  }
128
  .gradio-markdown p, .gradio-markdown li {
129
- font-size: 16px;
130
  color: #333333;
131
  }
132
  .gradio-button, .gradio-textbox {
133
  font-size: 16px;
 
134
  }
135
  """
136
 
137
  with gr.Blocks(css=custom_css) as demo:
138
- # Persistent state for the session.
139
  state = gr.State(init_state())
140
-
141
- gr.Markdown("# Adaptive Language Tutor")
142
  gr.Markdown(
143
- "This demo uses a T5-based model to generate questions from a passage. "
144
- "The difficulty will automatically adjust based on your performance."
145
  )
146
-
147
- # Display current difficulty and score.
148
- difficulty_label = gr.Markdown("**Difficulty**: (will be updated)")
149
-
150
  with gr.Row():
151
- generate_button = gr.Button("Generate Question")
152
- question_output = gr.Textbox(label="Question", interactive=False)
153
-
154
  user_answer = gr.Textbox(label="Your Answer")
155
  submit_button = gr.Button("Submit Answer")
156
  result_output = gr.Textbox(label="Result", interactive=False)
157
-
158
  def update_difficulty_label(state):
159
- return f"**Difficulty**: {state['difficulty']} (Score: {state['score']})"
160
-
161
- # Update the difficulty label when the interface loads.
162
  demo.load(fn=update_difficulty_label, inputs=state, outputs=difficulty_label)
163
-
164
- # Event: Generate a new question.
165
  def on_generate_question(state):
166
  question = generate_question(state)
167
  difficulty_text = update_difficulty_label(state)
168
  return question, difficulty_text
169
-
170
  generate_button.click(fn=on_generate_question, inputs=state, outputs=[question_output, difficulty_label])
171
-
172
- # Event: Submit the answer and check correctness.
173
  def on_submit_answer(user_answer, state):
174
  feedback = check_answer(state, user_answer)
175
  difficulty_text = update_difficulty_label(state)
176
  return feedback, difficulty_text
177
-
178
  submit_button.click(fn=on_submit_answer, inputs=[user_answer, state], outputs=[result_output, difficulty_label])
179
-
180
  demo.launch()
 
3
  from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline
4
 
5
  # -----------------------------
6
+ # 1. Load the Models
7
+ # a) For question generation (using T5-base QG model with slow tokenizer)
8
+ # b) For passage generation (using GPT-2 for dynamic passage creation)
9
  # -----------------------------
10
  tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-base-qg-hl", use_fast=False)
11
  model = T5ForConditionalGeneration.from_pretrained("valhalla/t5-base-qg-hl")
 
15
  tokenizer=tokenizer
16
  )
17
 
18
+ # Pipeline for text generation (passage generation)
19
+ tg_pipeline = pipeline(
20
+ "text-generation",
21
+ model="gpt2",
22
+ max_length=100,
23
+ do_sample=True
24
+ )
25
+
26
  # -----------------------------
27
+ # 2. Dynamically Generate a Passage Based on Difficulty
28
+ # The generated passage should include <hl> tokens to mark the key fact (answer).
29
  # -----------------------------
30
+ def generate_passage(difficulty):
31
+ if difficulty == "easy":
32
+ prompt = (
33
+ "Write a simple, short informative passage about a common fact. "
34
+ "Enclose one key fact with <hl> and <hl> to highlight it. "
35
+ "Keep it easy to understand."
36
+ )
37
+ elif difficulty == "medium":
38
+ prompt = (
39
+ "Write a medium-difficulty informative passage on a historical or scientific topic. "
40
+ "Enclose one key fact with <hl> and <hl> to highlight it. "
41
+ "The passage should be moderately challenging."
42
+ )
43
+ elif difficulty == "hard":
44
+ prompt = (
45
+ "Write a challenging, detailed informative passage on a complex topic. "
46
+ "Enclose one key fact with <hl> and <hl> to highlight it. "
47
+ "The passage should be thought-provoking and detailed."
48
+ )
49
+ else:
50
+ prompt = (
51
+ "Write an informative passage and enclose one key fact with <hl> and <hl> to highlight it."
52
+ )
53
+
54
+ # Generate a passage using the text-generation pipeline.
55
+ generated = tg_pipeline(prompt, num_return_sequences=1)[0]["generated_text"]
56
+
57
+ # If the generated passage does not include the <hl> tokens, insert them manually.
58
+ if "<hl>" not in generated:
59
+ words = generated.split()
60
+ if len(words) > 10:
61
+ insert_pos = len(words) // 2
62
+ words.insert(insert_pos, "<hl>")
63
+ words.insert(insert_pos + 2, "<hl>")
64
+ generated = " ".join(words)
65
+ return generated
66
 
67
  # -----------------------------
68
  # 3. Session State Initialization
 
73
  "score": 0,
74
  "question": "",
75
  "answer": "",
76
+ "passage": ""
77
  }
78
 
79
  # -----------------------------
 
82
  def adjust_difficulty(state):
83
  diff_order = ["easy", "medium", "hard"]
84
  idx = diff_order.index(state["difficulty"])
 
85
  if state["score"] >= 2 and idx < len(diff_order) - 1:
86
  state["difficulty"] = diff_order[idx + 1]
87
+ state["score"] = 0 # reset score when leveling up
88
+ return "Level up! Difficulty increased to: " + state["difficulty"]
89
  elif state["score"] <= -2 and idx > 0:
90
  state["difficulty"] = diff_order[idx - 1]
91
+ state["score"] = 0 # reset score when leveling down
92
+ return "Don't worry! Difficulty decreased to: " + state["difficulty"]
93
  else:
94
+ return f"Keep going! Current level: {state['difficulty']} (Score: {state['score']})"
95
 
96
  # -----------------------------
97
+ # 5. Generate a Question from a Dynamically Generated Passage
98
  # -----------------------------
99
  def generate_question(state):
100
+ # Dynamically generate a passage based on current difficulty.
101
+ passage = generate_passage(state["difficulty"])
102
+ state["passage"] = passage
103
+
104
+ # Extract the answer from the passage (text between <hl> tokens).
105
+ parts = passage.split("<hl>")
106
+ if len(parts) >= 3:
107
  answer = parts[1].strip()
108
  else:
109
  answer = "N/A"
110
+
111
+ # Use the question generation pipeline on the generated passage.
112
+ result = qg_pipeline(passage, max_length=64)
113
  question_text = result[0]["generated_text"]
114
+
 
115
  state["question"] = question_text
116
  state["answer"] = answer
 
117
  return question_text
118
 
119
  # -----------------------------
 
122
  def check_answer(state, user_answer):
123
  correct_answer = state["answer"].lower().strip()
124
  user_answer_clean = user_answer.lower().strip()
 
125
  if user_answer_clean == correct_answer:
126
  state["score"] += 1
127
+ result_text = "Correct! Nice work!"
128
  else:
129
  state["score"] -= 1
130
+ result_text = f"Oops! The correct answer was: {state['answer']}"
 
 
131
  difficulty_update = adjust_difficulty(state)
132
  return result_text + "\n" + difficulty_update
133
 
134
  # -----------------------------
135
+ # 7. Build the Gradio Interface with a Hip, Rainbow Theme
136
  # -----------------------------
137
  custom_css = """
138
  body {
139
+ background: linear-gradient(135deg, #ff0000, #ff7f00, #ffff00, #00ff00, #0000ff, #4b0082, #8f00ff);
140
+ font-family: 'Poppins', sans-serif;
141
  }
142
  .gradio-container {
143
+ background-color: rgba(255, 255, 255, 0.95);
144
+ border-radius: 20px;
145
+ padding: 30px;
146
+ box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
147
  }
148
  .gradio-markdown h1 {
149
+ color: #ff1493;
150
+ text-shadow: 2px 2px 4px #000000;
151
  }
152
  .gradio-markdown p, .gradio-markdown li {
153
+ font-size: 18px;
154
  color: #333333;
155
  }
156
  .gradio-button, .gradio-textbox {
157
  font-size: 16px;
158
+ border-radius: 10px;
159
  }
160
  """
161
 
162
  with gr.Blocks(css=custom_css) as demo:
 
163
  state = gr.State(init_state())
164
+
165
+ gr.Markdown("# Lingo Quest: The Ultimate Word Adventure")
166
  gr.Markdown(
167
+ "Welcome to **Lingo Quest**! Embark on a vibrant journey where every challenge is a step toward mastering language. "
168
+ "Answer questions correctly to level up and unlock new challenges. Get ready to flex your word skills and become a language legend!"
169
  )
170
+
171
+ # Display current level and score.
172
+ difficulty_label = gr.Markdown("**Current Level**: (will be updated)")
173
+
174
  with gr.Row():
175
+ generate_button = gr.Button("Start Challenge!")
176
+ question_output = gr.Textbox(label="Challenge Question", interactive=False)
177
+
178
  user_answer = gr.Textbox(label="Your Answer")
179
  submit_button = gr.Button("Submit Answer")
180
  result_output = gr.Textbox(label="Result", interactive=False)
181
+
182
  def update_difficulty_label(state):
183
+ return f"**Current Level**: {state['difficulty']} (Score: {state['score']})"
184
+
 
185
  demo.load(fn=update_difficulty_label, inputs=state, outputs=difficulty_label)
186
+
 
187
  def on_generate_question(state):
188
  question = generate_question(state)
189
  difficulty_text = update_difficulty_label(state)
190
  return question, difficulty_text
191
+
192
  generate_button.click(fn=on_generate_question, inputs=state, outputs=[question_output, difficulty_label])
193
+
 
194
  def on_submit_answer(user_answer, state):
195
  feedback = check_answer(state, user_answer)
196
  difficulty_text = update_difficulty_label(state)
197
  return feedback, difficulty_text
198
+
199
  submit_button.click(fn=on_submit_answer, inputs=[user_answer, state], outputs=[result_output, difficulty_label])
200
+
201
  demo.launch()