Spaces:
Running
Running
changes made to app.py
Browse files
app.py
CHANGED
@@ -10,50 +10,33 @@ import sys
|
|
10 |
# Recursion Handling Fix
|
11 |
# ===============================
|
12 |
def _patched_json_schema_to_python_type(schema, defs=None, depth=0):
|
13 |
-
# Safety check to prevent infinite recursion
|
14 |
if depth > 100:
|
15 |
return "Any"
|
16 |
-
|
17 |
-
# Handle boolean cases
|
18 |
if isinstance(schema, bool):
|
19 |
return "Any" if schema else "None"
|
20 |
-
|
21 |
-
# Call the original function with increased depth
|
22 |
try:
|
23 |
return client_utils._json_schema_to_python_type(schema, defs)
|
24 |
except RecursionError:
|
25 |
return "Any"
|
26 |
|
27 |
-
# Modify the utilities to use the patched function
|
28 |
client_utils._json_schema_to_python_type = _patched_json_schema_to_python_type
|
29 |
-
|
30 |
-
# Increase recursion limit as a backup
|
31 |
sys.setrecursionlimit(10000)
|
32 |
|
33 |
-
#
|
|
|
|
|
34 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
35 |
-
|
36 |
hf_token = os.environ["HF_TOKEN"]
|
37 |
|
38 |
-
|
39 |
-
# ===============================
|
40 |
-
# Load Question Generation Model
|
41 |
-
# ===============================
|
42 |
-
model_path = "AI-Mock-Interviewer/T5"
|
43 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
44 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
|
45 |
-
|
46 |
-
# Move model to the appropriate device
|
47 |
model.to(device)
|
48 |
|
49 |
-
# ===============================
|
50 |
-
# Load Evaluation Model (QwQ)
|
51 |
-
# ===============================
|
52 |
bnb_config = BitsAndBytesConfig(
|
53 |
load_in_8bit=True,
|
54 |
llm_int8_enable_fp32_cpu_offload=True,
|
55 |
)
|
56 |
-
|
57 |
qwq_model_id = "unsloth/QwQ-32B-unsloth-bnb-4bit"
|
58 |
qwq_tokenizer = AutoTokenizer.from_pretrained(qwq_model_id, trust_remote_code=True)
|
59 |
qwq_model = AutoModelForCausalLM.from_pretrained(
|
@@ -67,11 +50,7 @@ qwq_model = AutoModelForCausalLM.from_pretrained(
|
|
67 |
# Prompts and Scoring
|
68 |
# ===============================
|
69 |
system_prompt = """
|
70 |
-
You are conducting a mock technical interview. The candidate's experience level can be entry-level, mid-level, or senior-level
|
71 |
-
1. The question should be relevant to the domain and appropriate for the candidate's experience level.
|
72 |
-
2. For follow-up questions, analyze the candidate's last response and ask questions that probe deeper into their understanding.
|
73 |
-
3. Avoid repeating previously asked questions or subtopics.
|
74 |
-
4. Keep questions clear and concise, targeting core technical and communication skills.
|
75 |
"""
|
76 |
|
77 |
subtopic_keywords = {
|
@@ -84,7 +63,19 @@ rating_scores = {"Good": 3, "Average": 2, "Needs Improvement": 1}
|
|
84 |
score_categories = [(90, "Excellent"), (75, "Very Good"), (60, "Good"), (45, "Average"), (0, "Needs Improvement")]
|
85 |
|
86 |
# ===============================
|
87 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
# ===============================
|
89 |
def identify_subtopic(question, domain):
|
90 |
domain = domain.lower()
|
@@ -96,14 +87,11 @@ def identify_subtopic(question, domain):
|
|
96 |
|
97 |
def generate_question(prompt, domain, state=None):
|
98 |
full_prompt = system_prompt + "\n" + prompt
|
99 |
-
# Explicitly set padding side and add pad token
|
100 |
tokenizer.padding_side = "left"
|
101 |
if tokenizer.pad_token is None:
|
102 |
tokenizer.pad_token = tokenizer.eos_token
|
103 |
-
|
104 |
-
# Tokenize with explicit padding and attention mask
|
105 |
inputs = tokenizer(full_prompt, return_tensors="pt", padding=True, truncation=True).to(device)
|
106 |
-
|
107 |
outputs = model.generate(
|
108 |
inputs["input_ids"],
|
109 |
attention_mask=inputs["attention_mask"],
|
@@ -122,21 +110,18 @@ def generate_question(prompt, domain, state=None):
|
|
122 |
subtopic = identify_subtopic(question, domain)
|
123 |
|
124 |
if state is not None:
|
125 |
-
if
|
126 |
-
(subtopic is None or subtopic not in state["asked_subtopics"])):
|
127 |
state["asked_questions"].append(question)
|
128 |
if subtopic:
|
129 |
state["asked_subtopics"].append(subtopic)
|
130 |
return question
|
131 |
return question
|
132 |
|
133 |
-
|
134 |
def evaluate_response(response, question):
|
135 |
-
# Explicitly set padding side and add pad token
|
136 |
qwq_tokenizer.padding_side = "left"
|
137 |
if qwq_tokenizer.pad_token is None:
|
138 |
qwq_tokenizer.pad_token = qwq_tokenizer.eos_token
|
139 |
-
|
140 |
eval_prompt = (
|
141 |
"Evaluate the following candidate response to an interview question.\n\n"
|
142 |
f"**Question:** {question}\n"
|
@@ -145,10 +130,8 @@ def evaluate_response(response, question):
|
|
145 |
"Also, provide a brief suggestion for improvement. Format:\n"
|
146 |
"Rating: <Rating>\nSuggestion: <Suggestion>"
|
147 |
)
|
148 |
-
|
149 |
-
# Tokenize with explicit padding and attention mask
|
150 |
inputs = qwq_tokenizer(eval_prompt, return_tensors="pt", padding=True, truncation=True).to(qwq_model.device)
|
151 |
-
|
152 |
outputs = qwq_model.generate(
|
153 |
inputs["input_ids"],
|
154 |
attention_mask=inputs["attention_mask"],
|
@@ -183,105 +166,48 @@ def reset_state(name, domain, company, level):
|
|
183 |
|
184 |
def start_interview(name, domain, company, level):
|
185 |
try:
|
186 |
-
|
187 |
-
print(f"Start Interview Called:")
|
188 |
-
print(f"Name: {name}")
|
189 |
-
print(f"Domain: {domain}")
|
190 |
-
print(f"Company: {company}")
|
191 |
-
print(f"Level: {level}")
|
192 |
-
|
193 |
-
# Validate inputs
|
194 |
if not name or not domain:
|
195 |
-
return [{"role": "
|
196 |
|
197 |
-
# Create initial state
|
198 |
state = reset_state(name, domain, company, level)
|
199 |
-
print("State reset successfully")
|
200 |
-
|
201 |
-
# Prepare prompt for question generation
|
202 |
prompt = f"Domain: {domain}. Candidate experience level: {level}. Generate the first question:"
|
203 |
-
|
204 |
-
|
205 |
-
# Verify model is ready
|
206 |
-
print("Model device:", model.device)
|
207 |
-
print("Model ready:", model is not None)
|
208 |
-
|
209 |
-
# Generate first question
|
210 |
-
try:
|
211 |
-
question = generate_question(prompt, domain, state)
|
212 |
-
print(f"Generated Question: {question}")
|
213 |
-
except Exception as q_error:
|
214 |
-
print(f"Question Generation Error: {q_error}")
|
215 |
-
question = f"Error generating question: {q_error}"
|
216 |
-
|
217 |
-
# Append question to conversation
|
218 |
state["conversation"].append({"role": "Interviewer", "content": question})
|
219 |
-
|
220 |
-
return state["conversation"], state
|
221 |
|
222 |
-
|
223 |
-
print(f"CRITICAL ERROR in start_interview: {e}")
|
224 |
-
import traceback
|
225 |
-
traceback.print_exc()
|
226 |
-
return [{"role": "System", "content": f"Critical error: {e}"}], None
|
227 |
|
228 |
-
|
|
|
229 |
|
230 |
def submit_response(response, state):
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
# Ensure state is not None and interview is active
|
236 |
-
if state is None:
|
237 |
-
print("State is None, resetting")
|
238 |
-
state = reset_state("", "", "", "Entry-Level")
|
239 |
-
|
240 |
-
if not state.get("interview_active", False):
|
241 |
-
print("Interview not active")
|
242 |
-
return state["conversation"], state
|
243 |
-
|
244 |
-
# Handle empty response
|
245 |
if not response or not response.strip():
|
246 |
-
print("Empty response")
|
247 |
state["conversation"].append({"role": "System", "content": "⚠️ Please answer the question before proceeding."})
|
248 |
-
return state["conversation"], state
|
249 |
|
250 |
-
# Exit condition
|
251 |
if response.strip().lower() == "exit":
|
252 |
-
print("Exit requested")
|
253 |
return end_interview(state)
|
254 |
|
255 |
-
# Add candidate response to conversation
|
256 |
state["conversation"].append({"role": "Candidate", "content": response})
|
257 |
-
|
258 |
-
# Find the last interviewer question
|
259 |
last_q = next((msg["content"] for msg in reversed(state["conversation"]) if msg["role"] == "Interviewer"), "")
|
260 |
-
|
261 |
-
# Evaluate response
|
262 |
-
print("Evaluating response to question:", last_q)
|
263 |
rating, suggestion = evaluate_response(response, last_q)
|
264 |
|
265 |
-
# Add evaluation to conversation and state
|
266 |
state["evaluations"].append({
|
267 |
"question": last_q,
|
268 |
"response": response,
|
269 |
"rating": rating,
|
270 |
"suggestion": suggestion
|
271 |
})
|
272 |
-
|
273 |
state["conversation"].append({"role": "Evaluator", "content": f"Rating: {rating}\nSuggestion: {suggestion}"})
|
274 |
|
275 |
-
# Generate follow-up question
|
276 |
prompt = f"Domain: {state['domain']}. Candidate's last response: {response}. Generate a follow-up question:"
|
277 |
follow_up = generate_question(prompt, state["domain"], state)
|
278 |
-
|
279 |
-
print("Generated Follow-up Question:", follow_up)
|
280 |
state["conversation"].append({"role": "Interviewer", "content": follow_up})
|
281 |
|
282 |
-
return state["conversation"], state
|
283 |
-
|
284 |
-
print("Conversation returned to UI:", state["conversation"])
|
285 |
|
286 |
def end_interview(state):
|
287 |
state["interview_active"] = False
|
@@ -307,7 +233,7 @@ def end_interview(state):
|
|
307 |
json.dump(summary, f, indent=4)
|
308 |
|
309 |
state["conversation"].append({"role": "System", "content": f"✅ Interview ended. \nFinal Score: {summary['score']} ({summary['category']})"})
|
310 |
-
return state["conversation"], state
|
311 |
|
312 |
def clear_state():
|
313 |
return [], reset_state("", "", "", "Entry-Level")
|
@@ -337,14 +263,11 @@ with gr.Blocks() as demo:
|
|
337 |
exit_button = gr.Button("Exit Interview")
|
338 |
clear_button = gr.Button("Clear Session")
|
339 |
|
340 |
-
# Initialize state with proper structure
|
341 |
state = gr.State(value=reset_state("", "", "", "Entry-Level"))
|
342 |
|
343 |
-
start_button.click(start_interview,
|
344 |
-
inputs=[name_input, domain_input, company_input, level_input],
|
345 |
-
outputs=[chatbot, state])
|
346 |
submit_button.click(submit_response, inputs=[response_input, state], outputs=[chatbot, state]).then(lambda: "", None, response_input)
|
347 |
exit_button.click(end_interview, inputs=state, outputs=[chatbot, state])
|
348 |
clear_button.click(clear_state, outputs=[chatbot, state])
|
349 |
|
350 |
-
demo.launch()
|
|
|
10 |
# Recursion Handling Fix
|
11 |
# ===============================
|
12 |
def _patched_json_schema_to_python_type(schema, defs=None, depth=0):
|
|
|
13 |
if depth > 100:
|
14 |
return "Any"
|
|
|
|
|
15 |
if isinstance(schema, bool):
|
16 |
return "Any" if schema else "None"
|
|
|
|
|
17 |
try:
|
18 |
return client_utils._json_schema_to_python_type(schema, defs)
|
19 |
except RecursionError:
|
20 |
return "Any"
|
21 |
|
|
|
22 |
client_utils._json_schema_to_python_type = _patched_json_schema_to_python_type
|
|
|
|
|
23 |
sys.setrecursionlimit(10000)
|
24 |
|
25 |
+
# ===============================
|
26 |
+
# Device and Model Setup
|
27 |
+
# ===============================
|
28 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
29 |
hf_token = os.environ["HF_TOKEN"]
|
30 |
|
31 |
+
model_path = "AI-Mock-Interviewer/T5"
|
|
|
|
|
|
|
|
|
32 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
33 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
|
|
|
|
|
34 |
model.to(device)
|
35 |
|
|
|
|
|
|
|
36 |
bnb_config = BitsAndBytesConfig(
|
37 |
load_in_8bit=True,
|
38 |
llm_int8_enable_fp32_cpu_offload=True,
|
39 |
)
|
|
|
40 |
qwq_model_id = "unsloth/QwQ-32B-unsloth-bnb-4bit"
|
41 |
qwq_tokenizer = AutoTokenizer.from_pretrained(qwq_model_id, trust_remote_code=True)
|
42 |
qwq_model = AutoModelForCausalLM.from_pretrained(
|
|
|
50 |
# Prompts and Scoring
|
51 |
# ===============================
|
52 |
system_prompt = """
|
53 |
+
You are conducting a mock technical interview. The candidate's experience level can be entry-level, mid-level, or senior-level...
|
|
|
|
|
|
|
|
|
54 |
"""
|
55 |
|
56 |
subtopic_keywords = {
|
|
|
63 |
score_categories = [(90, "Excellent"), (75, "Very Good"), (60, "Good"), (45, "Average"), (0, "Needs Improvement")]
|
64 |
|
65 |
# ===============================
|
66 |
+
# Utility for Gradio Chat Format
|
67 |
+
# ===============================
|
68 |
+
def convert_for_gradio(convo):
|
69 |
+
role_map = {
|
70 |
+
"Interviewer": "assistant",
|
71 |
+
"Candidate": "user",
|
72 |
+
"Evaluator": "system",
|
73 |
+
"System": "system"
|
74 |
+
}
|
75 |
+
return [{"role": role_map.get(msg["role"], "system"), "content": msg["content"]} for msg in convo]
|
76 |
+
|
77 |
+
# ===============================
|
78 |
+
# Core Functions
|
79 |
# ===============================
|
80 |
def identify_subtopic(question, domain):
|
81 |
domain = domain.lower()
|
|
|
87 |
|
88 |
def generate_question(prompt, domain, state=None):
|
89 |
full_prompt = system_prompt + "\n" + prompt
|
|
|
90 |
tokenizer.padding_side = "left"
|
91 |
if tokenizer.pad_token is None:
|
92 |
tokenizer.pad_token = tokenizer.eos_token
|
93 |
+
|
|
|
94 |
inputs = tokenizer(full_prompt, return_tensors="pt", padding=True, truncation=True).to(device)
|
|
|
95 |
outputs = model.generate(
|
96 |
inputs["input_ids"],
|
97 |
attention_mask=inputs["attention_mask"],
|
|
|
110 |
subtopic = identify_subtopic(question, domain)
|
111 |
|
112 |
if state is not None:
|
113 |
+
if question not in state["asked_questions"] and (subtopic is None or subtopic not in state["asked_subtopics"]):
|
|
|
114 |
state["asked_questions"].append(question)
|
115 |
if subtopic:
|
116 |
state["asked_subtopics"].append(subtopic)
|
117 |
return question
|
118 |
return question
|
119 |
|
|
|
120 |
def evaluate_response(response, question):
|
|
|
121 |
qwq_tokenizer.padding_side = "left"
|
122 |
if qwq_tokenizer.pad_token is None:
|
123 |
qwq_tokenizer.pad_token = qwq_tokenizer.eos_token
|
124 |
+
|
125 |
eval_prompt = (
|
126 |
"Evaluate the following candidate response to an interview question.\n\n"
|
127 |
f"**Question:** {question}\n"
|
|
|
130 |
"Also, provide a brief suggestion for improvement. Format:\n"
|
131 |
"Rating: <Rating>\nSuggestion: <Suggestion>"
|
132 |
)
|
133 |
+
|
|
|
134 |
inputs = qwq_tokenizer(eval_prompt, return_tensors="pt", padding=True, truncation=True).to(qwq_model.device)
|
|
|
135 |
outputs = qwq_model.generate(
|
136 |
inputs["input_ids"],
|
137 |
attention_mask=inputs["attention_mask"],
|
|
|
166 |
|
167 |
def start_interview(name, domain, company, level):
|
168 |
try:
|
169 |
+
print(f"Start Interview Called:\nName: {name}\nDomain: {domain}\nLevel: {level}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
if not name or not domain:
|
171 |
+
return [{"role": "system", "content": "Please provide a name and domain"}], None
|
172 |
|
|
|
173 |
state = reset_state(name, domain, company, level)
|
|
|
|
|
|
|
174 |
prompt = f"Domain: {domain}. Candidate experience level: {level}. Generate the first question:"
|
175 |
+
question = generate_question(prompt, domain, state)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
state["conversation"].append({"role": "Interviewer", "content": question})
|
|
|
|
|
177 |
|
178 |
+
return convert_for_gradio(state["conversation"]), state
|
|
|
|
|
|
|
|
|
179 |
|
180 |
+
except Exception as e:
|
181 |
+
return [{"role": "system", "content": f"Critical error: {e}"}], None
|
182 |
|
183 |
def submit_response(response, state):
|
184 |
+
if state is None or not state.get("interview_active", False):
|
185 |
+
return [{"role": "system", "content": "Interview is not active."}], state
|
186 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
if not response or not response.strip():
|
|
|
188 |
state["conversation"].append({"role": "System", "content": "⚠️ Please answer the question before proceeding."})
|
189 |
+
return convert_for_gradio(state["conversation"]), state
|
190 |
|
|
|
191 |
if response.strip().lower() == "exit":
|
|
|
192 |
return end_interview(state)
|
193 |
|
|
|
194 |
state["conversation"].append({"role": "Candidate", "content": response})
|
|
|
|
|
195 |
last_q = next((msg["content"] for msg in reversed(state["conversation"]) if msg["role"] == "Interviewer"), "")
|
|
|
|
|
|
|
196 |
rating, suggestion = evaluate_response(response, last_q)
|
197 |
|
|
|
198 |
state["evaluations"].append({
|
199 |
"question": last_q,
|
200 |
"response": response,
|
201 |
"rating": rating,
|
202 |
"suggestion": suggestion
|
203 |
})
|
|
|
204 |
state["conversation"].append({"role": "Evaluator", "content": f"Rating: {rating}\nSuggestion: {suggestion}"})
|
205 |
|
|
|
206 |
prompt = f"Domain: {state['domain']}. Candidate's last response: {response}. Generate a follow-up question:"
|
207 |
follow_up = generate_question(prompt, state["domain"], state)
|
|
|
|
|
208 |
state["conversation"].append({"role": "Interviewer", "content": follow_up})
|
209 |
|
210 |
+
return convert_for_gradio(state["conversation"]), state
|
|
|
|
|
211 |
|
212 |
def end_interview(state):
|
213 |
state["interview_active"] = False
|
|
|
233 |
json.dump(summary, f, indent=4)
|
234 |
|
235 |
state["conversation"].append({"role": "System", "content": f"✅ Interview ended. \nFinal Score: {summary['score']} ({summary['category']})"})
|
236 |
+
return convert_for_gradio(state["conversation"]), state
|
237 |
|
238 |
def clear_state():
|
239 |
return [], reset_state("", "", "", "Entry-Level")
|
|
|
263 |
exit_button = gr.Button("Exit Interview")
|
264 |
clear_button = gr.Button("Clear Session")
|
265 |
|
|
|
266 |
state = gr.State(value=reset_state("", "", "", "Entry-Level"))
|
267 |
|
268 |
+
start_button.click(start_interview, inputs=[name_input, domain_input, company_input, level_input], outputs=[chatbot, state])
|
|
|
|
|
269 |
submit_button.click(submit_response, inputs=[response_input, state], outputs=[chatbot, state]).then(lambda: "", None, response_input)
|
270 |
exit_button.click(end_interview, inputs=state, outputs=[chatbot, state])
|
271 |
clear_button.click(clear_state, outputs=[chatbot, state])
|
272 |
|
273 |
+
demo.launch()
|