WSLINMSAI commited on
Commit
ab2ce90
·
verified ·
1 Parent(s): 0cc6504

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -57
app.py CHANGED
@@ -3,9 +3,8 @@ import gradio as gr
3
  from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline
4
 
5
  # -----------------------------
6
- # 1. Load the Models
7
- # a) For question generation (using T5-base QG model with slow tokenizer)
8
- # b) For passage generation (using GPT-2 for dynamic passage creation)
9
  # -----------------------------
10
  tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-base-qg-hl", use_fast=False)
11
  model = T5ForConditionalGeneration.from_pretrained("valhalla/t5-base-qg-hl")
@@ -15,54 +14,47 @@ qg_pipeline = pipeline(
15
  tokenizer=tokenizer
16
  )
17
 
18
- # Pipeline for text generation (passage generation)
19
- tg_pipeline = pipeline(
20
- "text-generation",
21
- model="gpt2",
22
- max_length=100,
23
- do_sample=True
24
- )
25
-
26
  # -----------------------------
27
- # 2. Dynamically Generate a Passage Based on Difficulty
28
- # The generated passage should include <hl> tokens to mark the key fact (answer).
29
  # -----------------------------
30
- def generate_passage(difficulty):
31
- if difficulty == "easy":
32
- prompt = (
33
- "Write a simple, short informative passage about a common fact. "
34
- "Enclose one key fact with <hl> and <hl> to highlight it. "
35
- "Keep it easy to understand."
36
- )
37
- elif difficulty == "medium":
38
- prompt = (
39
- "Write a medium-difficulty informative passage on a historical or scientific topic. "
40
- "Enclose one key fact with <hl> and <hl> to highlight it. "
41
- "The passage should be moderately challenging."
42
- )
43
- elif difficulty == "hard":
44
- prompt = (
45
- "Write a challenging, detailed informative passage on a complex topic. "
46
- "Enclose one key fact with <hl> and <hl> to highlight it. "
47
- "The passage should be thought-provoking and detailed."
48
- )
49
- else:
50
- prompt = (
51
- "Write an informative passage and enclose one key fact with <hl> and <hl> to highlight it."
52
- )
53
-
54
- # Generate a passage using the text-generation pipeline.
55
- generated = tg_pipeline(prompt, num_return_sequences=1)[0]["generated_text"]
56
-
57
- # If the generated passage does not include the <hl> tokens, insert them manually.
58
- if "<hl>" not in generated:
59
- words = generated.split()
60
- if len(words) > 10:
61
- insert_pos = len(words) // 2
62
- words.insert(insert_pos, "<hl>")
63
- words.insert(insert_pos + 2, "<hl>")
64
- generated = " ".join(words)
65
- return generated
 
 
66
 
67
  # -----------------------------
68
  # 3. Session State Initialization
@@ -84,34 +76,35 @@ def adjust_difficulty(state):
84
  idx = diff_order.index(state["difficulty"])
85
  if state["score"] >= 2 and idx < len(diff_order) - 1:
86
  state["difficulty"] = diff_order[idx + 1]
87
- state["score"] = 0 # reset score when leveling up
88
  return "Level up! Difficulty increased to: " + state["difficulty"]
89
  elif state["score"] <= -2 and idx > 0:
90
  state["difficulty"] = diff_order[idx - 1]
91
- state["score"] = 0 # reset score when leveling down
92
  return "Don't worry! Difficulty decreased to: " + state["difficulty"]
93
  else:
94
  return f"Keep going! Current level: {state['difficulty']} (Score: {state['score']})"
95
 
96
  # -----------------------------
97
- # 5. Generate a Question from a Dynamically Generated Passage
98
  # -----------------------------
99
  def generate_question(state):
100
- # Dynamically generate a passage based on current difficulty.
101
- passage = generate_passage(state["difficulty"])
102
- state["passage"] = passage
103
 
104
- # Extract the answer from the passage (text between <hl> tokens).
105
  parts = passage.split("<hl>")
106
  if len(parts) >= 3:
107
  answer = parts[1].strip()
108
  else:
109
  answer = "N/A"
110
 
111
- # Use the question generation pipeline on the generated passage.
112
  result = qg_pipeline(passage, max_length=64)
113
  question_text = result[0]["generated_text"]
114
 
 
 
115
  state["question"] = question_text
116
  state["answer"] = answer
117
  return question_text
 
3
  from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline
4
 
5
  # -----------------------------
6
+ # 1. Load the Model & Slow Tokenizer
7
+ # Using the "valhalla/t5-base-qg-hl" model for improved question quality.
 
8
  # -----------------------------
9
  tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-base-qg-hl", use_fast=False)
10
  model = T5ForConditionalGeneration.from_pretrained("valhalla/t5-base-qg-hl")
 
14
  tokenizer=tokenizer
15
  )
16
 
 
 
 
 
 
 
 
 
17
  # -----------------------------
18
+ # 2. Define Fixed Passages by Difficulty
 
19
  # -----------------------------
20
+ passages = {
21
+ "easy": [
22
+ "What is the capital of France? <hl>Paris<hl>",
23
+ "What type of animal is a cat? <hl>Feline<hl>",
24
+ "What liquid is essential for all known forms of life? <hl>Water<hl>",
25
+ "In which direction does the sun rise? <hl>East<hl>",
26
+ "What quality are dogs best known for? <hl>Loyalty<hl>",
27
+ "Which planet do humans live on? <hl>Earth<hl>",
28
+ "Which fruit is commonly red and grows on trees? <hl>Apple<hl>",
29
+ "What device is used to tell time? <hl>Clock<hl>",
30
+ "Which object in the sky shines at night? <hl>Moon<hl>",
31
+ "What appears in the sky after rain? <hl>Rainbow<hl>"
32
+ ],
33
+ "medium": [
34
+ "In which country was Albert Einstein born? <hl>Germany<hl>",
35
+ "What is the process by which plants convert sunlight into energy? <hl>Photosynthesis<hl>",
36
+ "Which explorer sailed across the Atlantic Ocean in 1492? <hl>Columbus<hl>",
37
+ "Which rainforest is the largest in the world? <hl>Amazon<hl>",
38
+ "Which ocean is the largest and deepest on Earth? <hl>Pacific<hl>",
39
+ "Which gas is essential for human breathing? <hl>Oxygen<hl>",
40
+ "Which galaxy contains our solar system? <hl>Milky<hl>",
41
+ "What structures in Egypt were built as tombs for pharaohs? <hl>Pyramids<hl>",
42
+ "Which revolution transformed manufacturing in the 18th and 19th centuries? <hl>Industrial<hl>",
43
+ "What system of government allows citizens to elect their leaders? <hl>Democracy<hl>"
44
+ ],
45
+ "hard": [
46
+ "Which historical period was a European cultural and intellectual revival? <hl>Renaissance<hl>",
47
+ "Which branch of physics describes nature at the smallest scales? <hl>Quantum<hl>",
48
+ "What document established the framework of the U.S. government? <hl>Constitution<hl>",
49
+ "Which empire was the continuation of the Roman Empire in the East? <hl>Byzantine<hl>",
50
+ "Which fundamental particle gives mass to others? <hl>Higgs<hl>",
51
+ "Which principle states that position and momentum cannot both be precisely known? <hl>Heisenberg<hl>",
52
+ "What was the period of political tension between the U.S. and the Soviet Union called? <hl>Cold<hl>",
53
+ "Which collection of essays supported the ratification of the U.S. Constitution? <hl>Federalist<hl>",
54
+ "Which theoretical framework aims to unify quantum mechanics and relativity? <hl>String<hl>",
55
+ "Which field of computer science focuses on creating intelligent machines? <hl>Artificial<hl>"
56
+ ]
57
+ }
58
 
59
  # -----------------------------
60
  # 3. Session State Initialization
 
76
  idx = diff_order.index(state["difficulty"])
77
  if state["score"] >= 2 and idx < len(diff_order) - 1:
78
  state["difficulty"] = diff_order[idx + 1]
79
+ state["score"] = 0 # Reset score when leveling up
80
  return "Level up! Difficulty increased to: " + state["difficulty"]
81
  elif state["score"] <= -2 and idx > 0:
82
  state["difficulty"] = diff_order[idx - 1]
83
+ state["score"] = 0 # Reset score when leveling down
84
  return "Don't worry! Difficulty decreased to: " + state["difficulty"]
85
  else:
86
  return f"Keep going! Current level: {state['difficulty']} (Score: {state['score']})"
87
 
88
  # -----------------------------
89
+ # 5. Generate a Question from a Fixed Passage
90
  # -----------------------------
91
  def generate_question(state):
92
+ # Select a random passage from the fixed list based on the current difficulty.
93
+ passage = random.choice(passages[state["difficulty"]])
 
94
 
95
+ # Extract the answer from text between <hl> tokens.
96
  parts = passage.split("<hl>")
97
  if len(parts) >= 3:
98
  answer = parts[1].strip()
99
  else:
100
  answer = "N/A"
101
 
102
+ # Use the QG pipeline to generate a question from the fixed passage.
103
  result = qg_pipeline(passage, max_length=64)
104
  question_text = result[0]["generated_text"]
105
 
106
+ # Update state with the chosen passage, generated question, and correct answer.
107
+ state["passage"] = passage
108
  state["question"] = question_text
109
  state["answer"] = answer
110
  return question_text