Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import streamlit as st
|
|
2 |
import pandas as pd
|
3 |
import os
|
4 |
import torch
|
5 |
-
from transformers import
|
6 |
from huggingface_hub import HfFolder
|
7 |
from io import StringIO
|
8 |
|
@@ -18,7 +18,7 @@ os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
|
|
18 |
|
19 |
# Load the tokenizer and model
|
20 |
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
21 |
-
model_gpt2 =
|
22 |
|
23 |
# Create a pipeline for text generation using GPT-2
|
24 |
text_generator = pipeline("text-generation", model=model_gpt2, tokenizer=tokenizer)
|
@@ -34,7 +34,7 @@ def load_llama_model():
|
|
34 |
model_llama = AutoModelForCausalLM.from_pretrained(
|
35 |
model_name,
|
36 |
torch_dtype=torch.float16, # Use FP16 for reduced memory
|
37 |
-
|
38 |
)
|
39 |
tokenizer_llama = AutoTokenizer.from_pretrained(model_name, token=hf_token)
|
40 |
|
|
|
2 |
import pandas as pd
|
3 |
import os
|
4 |
import torch
|
5 |
+
from transformers import GPT2Model, GPT2Tokenizer, AutoTokenizer, AutoModelForCausalLM, pipeline
|
6 |
from huggingface_hub import HfFolder
|
7 |
from io import StringIO
|
8 |
|
|
|
18 |
|
19 |
# Load the tokenizer and model
|
20 |
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
21 |
+
model_gpt2 = GPT2Model.from_pretrained('gpt2')
|
22 |
|
23 |
# Create a pipeline for text generation using GPT-2
|
24 |
text_generator = pipeline("text-generation", model=model_gpt2, tokenizer=tokenizer)
|
|
|
34 |
model_llama = AutoModelForCausalLM.from_pretrained(
|
35 |
model_name,
|
36 |
torch_dtype=torch.float16, # Use FP16 for reduced memory
|
37 |
+
token=hf_token
|
38 |
)
|
39 |
tokenizer_llama = AutoTokenizer.from_pretrained(model_name, token=hf_token)
|
40 |
|