Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,10 +2,20 @@ import streamlit as st
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
# Load the model and tokenizer
|
6 |
-
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
|
10 |
# Streamlit app title
|
11 |
st.title("IntelliSoc Text Generation")
|
@@ -13,9 +23,6 @@ st.title("IntelliSoc Text Generation")
|
|
13 |
# Input prompt
|
14 |
prompt = st.text_area("Enter your prompt:", "Once upon a time")
|
15 |
|
16 |
-
# Slider for max length
|
17 |
-
max_length = st.slider("Max length of generated text", 50, 200, 100)
|
18 |
-
|
19 |
# Generate text on button click
|
20 |
if st.button("Generate Text"):
|
21 |
# Tokenize input
|
@@ -25,7 +32,7 @@ if st.button("Generate Text"):
|
|
25 |
with torch.no_grad():
|
26 |
outputs = model.generate(
|
27 |
inputs.input_ids,
|
28 |
-
max_length=
|
29 |
num_return_sequences=1,
|
30 |
no_repeat_ngram_size=2,
|
31 |
top_k=50,
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
5 |
+
# Disable safetensors fast GPU loading (if needed)
|
6 |
+
import os
|
7 |
+
os.environ["SAFETENSORS_FAST_GPU"] = "0"
|
8 |
+
|
9 |
+
# Cache the model and tokenizer
|
10 |
+
@st.cache_resource
|
11 |
+
def load_model_and_tokenizer():
|
12 |
+
model_name = "rajrakeshdr/IntelliSoc"
|
13 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
14 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, use_safetensors=False)
|
15 |
+
return model, tokenizer
|
16 |
+
|
17 |
# Load the model and tokenizer
|
18 |
+
model, tokenizer = load_model_and_tokenizer()
|
|
|
|
|
19 |
|
20 |
# Streamlit app title
|
21 |
st.title("IntelliSoc Text Generation")
|
|
|
23 |
# Input prompt
|
24 |
prompt = st.text_area("Enter your prompt:", "Once upon a time")
|
25 |
|
|
|
|
|
|
|
26 |
# Generate text on button click
|
27 |
if st.button("Generate Text"):
|
28 |
# Tokenize input
|
|
|
32 |
with torch.no_grad():
|
33 |
outputs = model.generate(
|
34 |
inputs.input_ids,
|
35 |
+
max_length=100,
|
36 |
num_return_sequences=1,
|
37 |
no_repeat_ngram_size=2,
|
38 |
top_k=50,
|