Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,101 +5,99 @@ import os # For environment variables
|
|
5 |
# Initialize the Hugging Face Inference Client
|
6 |
client = InferenceClient()
|
7 |
|
8 |
-
#
|
9 |
-
def
|
10 |
-
prompt
|
11 |
-
|
12 |
-
|
|
|
|
|
13 |
messages=[{"role": "user", "content": prompt}],
|
14 |
temperature=0.7,
|
15 |
max_tokens=1024,
|
16 |
-
top_p=0.8
|
|
|
17 |
)
|
18 |
-
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
|
21 |
-
prompt = os.getenv("PROMPT_HINT").format(problem=problem, difficulty=difficulty)
|
22 |
-
response = client.chat.completions.create(
|
23 |
-
model="Qwen/QwQ-32B-Preview",
|
24 |
-
messages=[{"role": "user", "content": prompt}],
|
25 |
-
temperature=0.7,
|
26 |
-
max_tokens=512,
|
27 |
-
top_p=0.8
|
28 |
-
)
|
29 |
-
return response.choices[0].message["content"]
|
30 |
-
|
31 |
-
def verify_solution(problem, solution):
|
32 |
-
prompt = os.getenv("PROMPT_VERIFY").format(problem=problem, solution=solution)
|
33 |
-
response = client.chat.completions.create(
|
34 |
-
model="Qwen/QwQ-32B-Preview",
|
35 |
-
messages=[{"role": "user", "content": prompt}],
|
36 |
-
temperature=0.7,
|
37 |
-
max_tokens=512,
|
38 |
-
top_p=0.8
|
39 |
-
)
|
40 |
-
return response.choices[0].message["content"]
|
41 |
-
|
42 |
-
def generate_practice_question(topic, difficulty):
|
43 |
-
prompt = os.getenv("PROMPT_GENERATE").format(topic=topic, difficulty=difficulty)
|
44 |
-
response = client.chat.completions.create(
|
45 |
-
model="Qwen/QwQ-32B-Preview",
|
46 |
-
messages=[{"role": "user", "content": prompt}],
|
47 |
-
temperature=0.7,
|
48 |
-
max_tokens=512,
|
49 |
-
top_p=0.8
|
50 |
-
)
|
51 |
-
return response.choices[0].message["content"]
|
52 |
-
|
53 |
-
def explain_concept(problem, difficulty):
|
54 |
-
prompt = os.getenv("PROMPT_EXPLAIN").format(problem=problem, difficulty=difficulty)
|
55 |
-
response = client.chat.completions.create(
|
56 |
-
model="Qwen/QwQ-32B-Preview",
|
57 |
-
messages=[{"role": "user", "content": prompt}],
|
58 |
-
temperature=0.7,
|
59 |
-
max_tokens=512,
|
60 |
-
top_p=0.8
|
61 |
-
)
|
62 |
-
return response.choices[0].message["content"]
|
63 |
-
|
64 |
-
# Create Gradio interface
|
65 |
with gr.Blocks() as app:
|
66 |
gr.Markdown("## Mathematical Insight Tutor")
|
67 |
gr.Markdown("An advanced AI-powered tutor to help you master math concepts.")
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
89 |
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
# Launch the app
|
105 |
app.launch(debug=True)
|
|
|
5 |
# Initialize the Hugging Face Inference Client
|
6 |
client = InferenceClient()
|
7 |
|
8 |
+
# Generic function to generate AI response with streaming
|
9 |
+
def generate_response(prompt_template, **kwargs):
|
10 |
+
# Format the prompt with provided arguments
|
11 |
+
prompt = os.getenv(prompt_template).format(**kwargs)
|
12 |
+
# Stream the response from the model
|
13 |
+
stream = client.chat.completions.create(
|
14 |
+
model="Qwen/Qwen2.5-Math-72B-Instruct",
|
15 |
messages=[{"role": "user", "content": prompt}],
|
16 |
temperature=0.7,
|
17 |
max_tokens=1024,
|
18 |
+
top_p=0.8,
|
19 |
+
stream=True
|
20 |
)
|
21 |
+
# Stream chunks as they are generated
|
22 |
+
response = ""
|
23 |
+
for chunk in stream:
|
24 |
+
response += chunk.choices[0].delta.content
|
25 |
+
yield response
|
26 |
|
27 |
+
# Gradio app interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
with gr.Blocks() as app:
|
29 |
gr.Markdown("## Mathematical Insight Tutor")
|
30 |
gr.Markdown("An advanced AI-powered tutor to help you master math concepts.")
|
31 |
|
32 |
+
# Function to create a reusable tab with inputs and outputs
|
33 |
+
def create_tab(tab_name, prompt_template, inputs):
|
34 |
+
with gr.Tab(tab_name):
|
35 |
+
input_fields = []
|
36 |
+
# Create input components dynamically
|
37 |
+
for inp in inputs:
|
38 |
+
if inp["type"] == "textbox":
|
39 |
+
input_fields.append(
|
40 |
+
gr.Textbox(lines=inp.get("lines", 1), label=inp["label"], placeholder=inp["placeholder"])
|
41 |
+
)
|
42 |
+
elif inp["type"] == "dropdown":
|
43 |
+
input_fields.append(
|
44 |
+
gr.Dropdown(choices=inp["choices"], label=inp["label"], value=inp.get("default", ""))
|
45 |
+
)
|
46 |
+
# Button and output
|
47 |
+
button = gr.Button(f"{tab_name} Execute")
|
48 |
+
output = gr.Markdown()
|
49 |
+
# Link button to the response generator
|
50 |
+
button.click(
|
51 |
+
fn=lambda *args: generate_response(prompt_template, **dict(zip([inp["key"] for inp in inputs], args))),
|
52 |
+
inputs=input_fields,
|
53 |
+
outputs=output
|
54 |
+
)
|
55 |
|
56 |
+
# Tabs for various functionalities
|
57 |
+
create_tab(
|
58 |
+
"Solve a Problem",
|
59 |
+
"PROMPT_SOLVE",
|
60 |
+
[
|
61 |
+
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
|
62 |
+
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
|
63 |
+
]
|
64 |
+
)
|
65 |
|
66 |
+
create_tab(
|
67 |
+
"Generate a Hint",
|
68 |
+
"PROMPT_HINT",
|
69 |
+
[
|
70 |
+
{"key": "problem", "type": "textbox", "label": "Enter Math Problem for Hint", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
|
71 |
+
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
|
72 |
+
]
|
73 |
+
)
|
74 |
|
75 |
+
create_tab(
|
76 |
+
"Verify Solution",
|
77 |
+
"PROMPT_VERIFY",
|
78 |
+
[
|
79 |
+
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
|
80 |
+
{"key": "solution", "type": "textbox", "label": "Enter Your Solution", "placeholder": "e.g., x = 5"}
|
81 |
+
]
|
82 |
+
)
|
83 |
|
84 |
+
create_tab(
|
85 |
+
"Generate Practice Question",
|
86 |
+
"PROMPT_GENERATE",
|
87 |
+
[
|
88 |
+
{"key": "topic", "type": "textbox", "label": "Enter Math Topic", "placeholder": "e.g., Algebra, Calculus"},
|
89 |
+
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
|
90 |
+
]
|
91 |
+
)
|
92 |
+
|
93 |
+
create_tab(
|
94 |
+
"Explain Concept",
|
95 |
+
"PROMPT_EXPLAIN",
|
96 |
+
[
|
97 |
+
{"key": "problem", "type": "textbox", "label": "Enter Math Problem", "placeholder": "e.g., Solve for x: 2x + 5 = 15"},
|
98 |
+
{"key": "difficulty", "type": "dropdown", "label": "Difficulty Level", "choices": ["Beginner", "Intermediate", "Advanced"]}
|
99 |
+
]
|
100 |
+
)
|
101 |
|
102 |
# Launch the app
|
103 |
app.launch(debug=True)
|