Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ import torch
|
|
3 |
import time
|
4 |
import spaces # Import the spaces library
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
6 |
|
7 |
# --- Configuration ---
|
8 |
MODEL_ID = "Qwen/Qwen2.5-Math-1.5B" # Replace with actual ID if found
|
@@ -47,6 +48,17 @@ def generate_response(messages, max_length=512, temperature=0.7, top_p=0.9):
|
|
47 |
num_generated_tokens = len(output_ids)
|
48 |
response = tokenizer.decode(output_ids, skip_special_tokens=True)
|
49 |
print("Generation complete.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
return response.strip(), num_generated_tokens
|
51 |
|
52 |
except Exception as e:
|
|
|
3 |
import time
|
4 |
import spaces # Import the spaces library
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
+
from db import save_test_result
|
7 |
|
8 |
# --- Configuration ---
|
9 |
MODEL_ID = "Qwen/Qwen2.5-Math-1.5B" # Replace with actual ID if found
|
|
|
48 |
num_generated_tokens = len(output_ids)
|
49 |
response = tokenizer.decode(output_ids, skip_special_tokens=True)
|
50 |
print("Generation complete.")
|
51 |
+
|
52 |
+
save_test_result(
|
53 |
+
analysis_mode='',
|
54 |
+
system_prompt='',
|
55 |
+
input_content='',
|
56 |
+
model_response= response,
|
57 |
+
generation_time='',
|
58 |
+
tokens_generated='',
|
59 |
+
temperature='',
|
60 |
+
top_p='',
|
61 |
+
max_length='')
|
62 |
return response.strip(), num_generated_tokens
|
63 |
|
64 |
except Exception as e:
|