joey1101 commited on
Commit
a6df8ff
·
verified ·
1 Parent(s): c96764a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -73,9 +73,12 @@ def response_gen(user_review):
73
  # Format the prompt with the user's review
74
  prompt = emotion_prompts.get(emotion_label, "Neutral").format(review=user_review)
75
 
76
- # Load a pre-trained text generation model (replace 'meta-llama/Llama-3.2-1B' with an available model)
77
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
78
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
 
 
 
79
  inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the prompt
80
  outputs = model.generate(**inputs, max_new_tokens=100) # Generate a response
81
 
 
73
  # Format the prompt with the user's review
74
  prompt = emotion_prompts.get(emotion_label, "Neutral").format(review=user_review)
75
 
76
+ # Load model directly
77
+ from transformers import AutoTokenizer, AutoModelForCausalLM
78
+
79
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B")
80
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B")
81
+
82
  inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the prompt
83
  outputs = model.generate(**inputs, max_new_tokens=100) # Generate a response
84