Spaces:
Runtime error
Runtime error
johnometalman
commited on
Commit
路
fefdd6e
1
Parent(s):
e486e1b
anejo de errores con modelo
Browse files
app.py
CHANGED
@@ -1,53 +1,54 @@
|
|
1 |
import streamlit as st
|
2 |
from utils import carga_modelo, genera
|
3 |
-
from dotenv import load_dotenv
|
4 |
import os
|
5 |
|
6 |
-
|
7 |
-
load_dotenv()
|
8 |
-
|
9 |
-
# Access the variables
|
10 |
-
token = os.getenv("HF_AUTH_TOKEN")
|
11 |
-
api_key = os.getenv("API_KEY")
|
12 |
-
repo_id = 'ceyda/butterfly_cropped_uniq1K_512'
|
13 |
-
modelo_gan = carga_modelo(repo_id, token)
|
14 |
-
|
15 |
-
|
16 |
-
# P谩gina principal
|
17 |
st.title('Generador de Mariposas')
|
18 |
st.write('Este es un modelo light GAN entrenado para generaci贸n de mariposas')
|
19 |
|
20 |
-
|
21 |
st.sidebar.subheader('Esta mariposa no existe, 驴Puedes creerlo?')
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
st.sidebar.caption('Demo creado en vivo')
|
24 |
|
25 |
-
#
|
26 |
repo_id = 'ceyda/butterfly_cropped_uniq1K_512'
|
27 |
-
try:
|
28 |
-
modelo_gan = carga_modelo(repo_id)
|
29 |
-
except Exception as e:
|
30 |
-
st.error(f"Error loading model: {str(e)}")
|
31 |
-
st.stop()
|
32 |
|
33 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
n_mariposas = 4
|
35 |
|
36 |
-
|
37 |
def corre():
|
38 |
with st.spinner('Generando espera un poco....'):
|
39 |
ims = genera(modelo_gan, n_mariposas)
|
40 |
-
st.session_state['ims'] = ims
|
41 |
|
42 |
if 'ims' not in st.session_state:
|
43 |
st.session_state['ims'] = None
|
44 |
-
|
45 |
-
if st.button('Genera mariposa por favor', help='Estamos en vuelo, abrocha tu cintur贸n'):
|
46 |
corre()
|
47 |
|
48 |
ims = st.session_state['ims']
|
49 |
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
cols = st.columns(n_mariposas)
|
52 |
for j, im in enumerate(ims):
|
53 |
i = j % n_mariposas
|
|
|
1 |
import streamlit as st
|
2 |
from utils import carga_modelo, genera
|
|
|
3 |
import os
|
4 |
|
5 |
+
## P谩gina principal
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
st.title('Generador de Mariposas')
|
7 |
st.write('Este es un modelo light GAN entrenado para generaci贸n de mariposas')
|
8 |
|
9 |
+
## Barra Lateral
|
10 |
st.sidebar.subheader('Esta mariposa no existe, 驴Puedes creerlo?')
|
11 |
+
logo_path = 'assets/logo.png'
|
12 |
+
|
13 |
+
if os.path.exists(logo_path):
|
14 |
+
st.sidebar.image(logo_path, width=200)
|
15 |
+
else:
|
16 |
+
st.sidebar.write("鈿狅笍 Logo not found.")
|
17 |
+
|
18 |
st.sidebar.caption('Demo creado en vivo')
|
19 |
|
20 |
+
# Define the repo_id for the model on Hugging Face (this is the identifier for the model repository)
|
21 |
repo_id = 'ceyda/butterfly_cropped_uniq1K_512'
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
# Access the Hugging Face token and API key from environment variables set in Hugging Face Spaces
|
24 |
+
hf_token = os.getenv("HF_AUTH_TOKEN")
|
25 |
+
api_key = os.getenv("API_KEY")
|
26 |
+
|
27 |
+
# Load the model using the repo_id and Hugging Face token for authentication
|
28 |
+
modelo_gan = carga_modelo(repo_id, hf_token)
|
29 |
+
|
30 |
+
# Number of butterflies to generate
|
31 |
n_mariposas = 4
|
32 |
|
33 |
+
## Core de la app
|
34 |
def corre():
|
35 |
with st.spinner('Generando espera un poco....'):
|
36 |
ims = genera(modelo_gan, n_mariposas)
|
37 |
+
st.session_state['ims'] = ims # Corrected line
|
38 |
|
39 |
if 'ims' not in st.session_state:
|
40 |
st.session_state['ims'] = None
|
|
|
|
|
41 |
corre()
|
42 |
|
43 |
ims = st.session_state['ims']
|
44 |
|
45 |
+
corre_boton = st.button(
|
46 |
+
'Genera mariposa por favor',
|
47 |
+
on_click=corre,
|
48 |
+
help='Estamos en vuelo, abrocha tu cintur贸n'
|
49 |
+
)
|
50 |
+
|
51 |
+
if ims is not None:
|
52 |
cols = st.columns(n_mariposas)
|
53 |
for j, im in enumerate(ims):
|
54 |
i = j % n_mariposas
|
utils.py
CHANGED
@@ -1,21 +1,13 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
from huggan.pytorch.lightweight_gan.lightweight_gan import LightweightGAN
|
4 |
from transformers import AutoModel
|
5 |
|
6 |
-
|
7 |
-
## Cargamos el modelo desde el Hub de Hugging Face
|
8 |
def carga_modelo(repo_id, token):
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
ims = gan.G(torch.randn(batch_size, gan.latent_dim)).clamp(0.0, 1.0) * 255
|
20 |
-
ims = ims.permute(0,2,3,1).deatch().cpu().numpy().astype(np.unit8)
|
21 |
-
return ims
|
|
|
|
|
|
|
|
|
1 |
from transformers import AutoModel
|
2 |
|
|
|
|
|
3 |
def carga_modelo(repo_id, token):
|
4 |
+
try:
|
5 |
+
# Attempt to load the model from Hugging Face with authentication token
|
6 |
+
return AutoModel.from_pretrained(repo_id, use_auth_token=token)
|
7 |
+
except Exception as e:
|
8 |
+
raise ValueError(f"Error loading model: {str(e)}")
|
9 |
+
|
10 |
+
def genera(modelo_gan, n_mariposas):
|
11 |
+
# Your generation logic here (for example, generating images)
|
12 |
+
# This is just a placeholder
|
13 |
+
return ["image1.png", "image2.png", "image3.png", "image4.png"] # Example generated images
|
|
|
|
|
|