joey1101 commited on
Commit
b9ee180
·
verified ·
1 Parent(s): 2ff092d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -74,8 +74,8 @@ def response_gen(user_review):
74
  prompt = emotion_prompts.get(emotion_label, "Neutral").format(review=user_review)
75
 
76
  # Load a pre-trained text generation model (replace 'meta-llama/Llama-3.2-1B' with an available model)
77
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
78
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
79
  inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the prompt
80
  outputs = model.generate(**inputs, max_new_tokens=100) # Generate a response
81
 
 
74
  prompt = emotion_prompts.get(emotion_label, "Neutral").format(review=user_review)
75
 
76
  # Load a pre-trained text generation model (replace 'meta-llama/Llama-3.2-1B' with an available model)
77
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B")
78
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B")
79
  inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the prompt
80
  outputs = model.generate(**inputs, max_new_tokens=100) # Generate a response
81