Sephfox commited on
Commit
920da7f
·
verified ·
1 Parent(s): 172050e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -42,7 +42,7 @@ emotion_classes = pd.Categorical(df['emotion']).categories
42
 
43
  # Load pre-trained BERT model for emotion prediction
44
  emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
45
- emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
46
 
47
  # Lazy loading for the fine-tuned language model (DialoGPT-medium)
48
  _finetuned_lm_tokenizer = None
@@ -52,7 +52,7 @@ def get_finetuned_lm_model():
52
  global _finetuned_lm_tokenizer, _finetuned_lm_model
53
  if _finetuned_lm_tokenizer is None or _finetuned_lm_model is None:
54
  model_name = "microsoft/DialoGPT-medium"
55
- _finetuned_lm_tokenizer = AutoTokenizer.from_pretrained(model_name)
56
  _finetuned_lm_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", low_cpu_mem_usage=True)
57
  _finetuned_lm_tokenizer.pad_token = _finetuned_lm_tokenizer.eos_token
58
  return _finetuned_lm_tokenizer, _finetuned_lm_model
 
42
 
43
  # Load pre-trained BERT model for emotion prediction
44
  emotion_prediction_model = AutoModelForSequenceClassification.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion")
45
+ emotion_prediction_tokenizer = AutoTokenizer.from_pretrained("bhadresh-savani/distilbert-base-uncased-emotion", padding_side='left')
46
 
47
  # Lazy loading for the fine-tuned language model (DialoGPT-medium)
48
  _finetuned_lm_tokenizer = None
 
52
  global _finetuned_lm_tokenizer, _finetuned_lm_model
53
  if _finetuned_lm_tokenizer is None or _finetuned_lm_model is None:
54
  model_name = "microsoft/DialoGPT-medium"
55
+ _finetuned_lm_tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left')
56
  _finetuned_lm_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", low_cpu_mem_usage=True)
57
  _finetuned_lm_tokenizer.pad_token = _finetuned_lm_tokenizer.eos_token
58
  return _finetuned_lm_tokenizer, _finetuned_lm_model