Sephfox commited on
Commit
ff63280
·
verified ·
1 Parent(s): 27a0681

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -174,7 +174,7 @@ def predict_emotion(context):
174
  def generate_text(prompt, chat_history, emotion=None, max_length=100):
175
  finetuned_lm_tokenizer, finetuned_lm_model = get_finetuned_lm_model()
176
 
177
- full_prompt = ""
178
  for turn in chat_history[-5:]: # Consider last 5 turns for context
179
  full_prompt += f"{finetuned_lm_tokenizer.eos_token}{turn[0]}{finetuned_lm_tokenizer.eos_token}{turn[1]}"
180
  full_prompt += f"{finetuned_lm_tokenizer.eos_token}{prompt}"
@@ -205,8 +205,7 @@ def generate_text(prompt, chat_history, emotion=None, max_length=100):
205
  do_sample=True,
206
  temperature=temperature,
207
  top_k=top_k,
208
- top_p=top_p,
209
- attention_mask=attention_mask
210
  )
211
 
212
  generated_text = finetuned_lm_tokenizer.decode(output[0], skip_special_tokens=True)
@@ -255,7 +254,7 @@ def respond_to_user(user_input, chat_history):
255
  dominant_emotion = get_dominant_emotion()
256
 
257
  # Generate a response considering the dominant emotion
258
- response = generate_text(user_input, dominant_emotion)
259
 
260
  # Update emotion history
261
  update_emotion_history(predicted_emotion, emotions[predicted_emotion]['intensity'])
 
174
  def generate_text(prompt, chat_history, emotion=None, max_length=100):
175
  finetuned_lm_tokenizer, finetuned_lm_model = get_finetuned_lm_model()
176
 
177
+ full_prompt = ""
178
  for turn in chat_history[-5:]: # Consider last 5 turns for context
179
  full_prompt += f"{finetuned_lm_tokenizer.eos_token}{turn[0]}{finetuned_lm_tokenizer.eos_token}{turn[1]}"
180
  full_prompt += f"{finetuned_lm_tokenizer.eos_token}{prompt}"
 
205
  do_sample=True,
206
  temperature=temperature,
207
  top_k=top_k,
208
+ top_p=top_p
 
209
  )
210
 
211
  generated_text = finetuned_lm_tokenizer.decode(output[0], skip_special_tokens=True)
 
254
  dominant_emotion = get_dominant_emotion()
255
 
256
  # Generate a response considering the dominant emotion
257
+ response = generate_text(user_input, chat_history, dominant_emotion)
258
 
259
  # Update emotion history
260
  update_emotion_history(predicted_emotion, emotions[predicted_emotion]['intensity'])