Spaces:
Running
on
Zero
Running
on
Zero
Changing Gr Theme
Browse files
app.py
CHANGED
@@ -53,24 +53,6 @@ def fused_sim(a:Image.Image,b:Image.Image,Ξ±=.5):
|
|
53 |
bnb_cfg = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True)
|
54 |
|
55 |
# ---------- load models once at startup ---------------------
|
56 |
-
_base = None
|
57 |
-
# @spaces.GPU
|
58 |
-
# def load_models():
|
59 |
-
# from unsloth import FastLanguageModel
|
60 |
-
# global base, tok, lora
|
61 |
-
# if base is None:
|
62 |
-
# print("Loading BASE β¦")
|
63 |
-
# base, tok = FastLanguageModel.from_pretrained(
|
64 |
-
# BASE_MODEL, max_seq_length=2048,
|
65 |
-
# load_in_4bit=True, quantization_config=bnb_cfg, device_map="auto")
|
66 |
-
# tok.pad_token = tok.eos_token
|
67 |
-
|
68 |
-
# print("Loading LoRA β¦")
|
69 |
-
# lora, _ = FastLanguageModel.from_pretrained(
|
70 |
-
# ADAPTER_DIR, max_seq_length=2048,
|
71 |
-
# load_in_4bit=True, quantization_config=bnb_cfg, device_map="auto")
|
72 |
-
# print("β models loaded")
|
73 |
-
|
74 |
_base = _lora = _tok = None
|
75 |
_CLIP = _PREP = _LP = None
|
76 |
|
@@ -89,18 +71,6 @@ def ensure_models():
|
|
89 |
quantization_config=bnb_cfg, device_map="auto")
|
90 |
return True
|
91 |
|
92 |
-
# @spaces.GPU
|
93 |
-
# def ensure_models():
|
94 |
-
# load_models()
|
95 |
-
# return True # small, pickle-able sentinel
|
96 |
-
|
97 |
-
|
98 |
-
def build_prompt(desc:str):
|
99 |
-
msgs=[{"role":"system","content":"You are an SVG illustrator."},
|
100 |
-
{"role":"user",
|
101 |
-
"content":f"ONLY reply with a valid, complete <svg>β¦</svg> file that depicts: {desc}"}]
|
102 |
-
return tok.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)
|
103 |
-
|
104 |
@spaces.GPU
|
105 |
@torch.no_grad()
|
106 |
def draw(model_flag, desc):
|
@@ -126,16 +96,7 @@ def compare(desc):
|
|
126 |
caption = "Thanks for trying our model π\nIf you don't see an image for the base or GRPO model that means it didn't generate a valid SVG!"
|
127 |
return img_b, img_l, caption, svg_b, svg_l
|
128 |
|
129 |
-
|
130 |
-
# ensure_models()
|
131 |
-
# img_base, svg_base = draw(base, desc)
|
132 |
-
# img_lora, svg_lora = draw(lora, desc)
|
133 |
-
# # sim = (fused_sim(img_lora, img_base) if img_base and img_lora else float("nan"))
|
134 |
-
|
135 |
-
# caption = "Thanks for trying our model π\nIf you don't see an image for the base or GRPO model that means it didn't generate a valid SVG!"
|
136 |
-
# return img_base, img_lora, caption, svg_base, svg_lora
|
137 |
-
|
138 |
-
with gr.Blocks(css="body{background:#111;color:#eee}") as demo:
|
139 |
gr.Markdown("## ποΈ Qwen-2.5 SVG Generator β base vs GRPO-LoRA")
|
140 |
gr.Markdown(
|
141 |
"Type an image **description** (e.g. *a purple forest at dusk*). "
|
|
|
53 |
bnb_cfg = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True)
|
54 |
|
55 |
# ---------- load models once at startup ---------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
_base = _lora = _tok = None
|
57 |
_CLIP = _PREP = _LP = None
|
58 |
|
|
|
71 |
quantization_config=bnb_cfg, device_map="auto")
|
72 |
return True
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
@spaces.GPU
|
75 |
@torch.no_grad()
|
76 |
def draw(model_flag, desc):
|
|
|
96 |
caption = "Thanks for trying our model π\nIf you don't see an image for the base or GRPO model that means it didn't generate a valid SVG!"
|
97 |
return img_b, img_l, caption, svg_b, svg_l
|
98 |
|
99 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
gr.Markdown("## ποΈ Qwen-2.5 SVG Generator β base vs GRPO-LoRA")
|
101 |
gr.Markdown(
|
102 |
"Type an image **description** (e.g. *a purple forest at dusk*). "
|