yakine commited on
Commit
f3e43ed
·
verified ·
1 Parent(s): 391774c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -2,7 +2,7 @@ import streamlit as st
2
  import pandas as pd
3
  import os
4
  import torch
5
- from transformers import GPT2LMHeadModel, GPT2Tokenizer, AutoTokenizer, AutoModelForCausalLM, pipeline
6
  from huggingface_hub import HfFolder
7
  from io import StringIO
8
 
@@ -18,7 +18,7 @@ os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
18
 
19
  # Load the tokenizer and model
20
  tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
21
- model_gpt2 = GPT2LMHeadModel.from_pretrained('gpt2')
22
 
23
  # Create a pipeline for text generation using GPT-2
24
  text_generator = pipeline("text-generation", model=model_gpt2, tokenizer=tokenizer)
@@ -34,7 +34,7 @@ def load_llama_model():
34
  model_llama = AutoModelForCausalLM.from_pretrained(
35
  model_name,
36
  torch_dtype=torch.float16, # Use FP16 for reduced memory
37
- use_auth_token=hf_token
38
  )
39
  tokenizer_llama = AutoTokenizer.from_pretrained(model_name, token=hf_token)
40
 
 
2
  import pandas as pd
3
  import os
4
  import torch
5
+ from transformers import GPT2Model, GPT2Tokenizer, AutoTokenizer, AutoModelForCausalLM, pipeline
6
  from huggingface_hub import HfFolder
7
  from io import StringIO
8
 
 
18
 
19
  # Load the tokenizer and model
20
  tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
21
+ model_gpt2 = GPT2Model.from_pretrained('gpt2')
22
 
23
  # Create a pipeline for text generation using GPT-2
24
  text_generator = pipeline("text-generation", model=model_gpt2, tokenizer=tokenizer)
 
34
  model_llama = AutoModelForCausalLM.from_pretrained(
35
  model_name,
36
  torch_dtype=torch.float16, # Use FP16 for reduced memory
37
+ token=hf_token
38
  )
39
  tokenizer_llama = AutoTokenizer.from_pretrained(model_name, token=hf_token)
40