text-to-image / app.py
prithivMLmods's picture
Update app.py
7fcd908 verified
raw
history blame
13.4 kB
import os
import random
import uuid
import json
import time
import asyncio
import re
from threading import Thread
import gradio as gr
import spaces
import torch
import numpy as np
from PIL import Image
import edge_tts
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TextIteratorStreamer,
Qwen2VLForConditionalGeneration,
AutoProcessor,
)
from transformers.image_utils import load_image
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
DESCRIPTION = """
# Gen Vision 🎃
Separate Tabs for Chat, Image Generation (LoRA), Qwen2 VL OCR and Text-to-Speech
"""
css = '''
h1 {
text-align: center;
display: block;
}
#duplicate-button {
margin: auto;
color: #fff;
background: #1565c0;
border-radius: 100vh;
}
'''
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# -----------------------
# Progress Bar Helper
# -----------------------
def progress_bar_html(label: str) -> str:
"""
Returns an HTML snippet for a thin progress bar with a label.
The progress bar is styled as a dark red animated bar.
"""
return f'''
<div style="display: flex; align-items: center;">
<span style="margin-right: 10px; font-size: 14px;">{label}</span>
<div style="width: 110px; height: 5px; background-color: #DDA0DD; border-radius: 2px; overflow: hidden;">
<div style="width: 100%; height: 100%; background-color: #FF00FF; animation: loading 1.5s linear infinite;"></div>
</div>
</div>
<style>
@keyframes loading {{
0% {{ transform: translateX(-100%); }}
100% {{ transform: translateX(100%); }}
}}
</style>
'''
# -----------------------
# Text Generation Setup (Chat)
# -----------------------
model_id = "prithivMLmods/FastThink-0.5B-Tiny"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.bfloat16,
)
model.eval()
# -----------------------
# TTS Setup
# -----------------------
TTS_VOICES = [
"en-US-JennyNeural",
"en-US-GuyNeural",
]
async def text_to_speech(text: str, voice: str, output_file="output.mp3"):
"""Convert text to speech using Edge TTS and save as MP3"""
communicate = edge_tts.Communicate(text, voice)
await communicate.save(output_file)
return output_file
# -----------------------
# Utility: Clean Chat History
# -----------------------
def clean_chat_history(chat_history):
"""
Filter out any chat entries whose "content" is not a string.
"""
cleaned = []
for msg in chat_history:
if isinstance(msg, dict) and isinstance(msg.get("content"), str):
cleaned.append(msg)
return cleaned
# -----------------------
# Qwen2 VL OCR Setup
# -----------------------
OCR_MODEL_ID = "prithivMLmods/Qwen2-VL-OCR2-2B-Instruct"
processor = AutoProcessor.from_pretrained(OCR_MODEL_ID, trust_remote_code=True)
model_m = Qwen2VLForConditionalGeneration.from_pretrained(
OCR_MODEL_ID,
trust_remote_code=True,
torch_dtype=torch.float16
).to("cuda").eval()
# -----------------------
# Stable Diffusion Image Generation Setup (LoRA)
# -----------------------
MAX_SEED = np.iinfo(np.int32).max
USE_TORCH_COMPILE = False
ENABLE_CPU_OFFLOAD = False
if torch.cuda.is_available():
pipe = StableDiffusionXLPipeline.from_pretrained(
"SG161222/RealVisXL_V4.0_Lightning",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
# LoRA options with one example for each.
LORA_OPTIONS = {
"Realism": ("prithivMLmods/Canopus-Realism-LoRA", "Canopus-Realism-LoRA.safetensors", "rlms"),
"Pixar": ("prithivMLmods/Canopus-Pixar-Art", "Canopus-Pixar-Art.safetensors", "pixar"),
"Photoshoot": ("prithivMLmods/Canopus-Photo-Shoot-Mini-LoRA", "Canopus-Photo-Shoot-Mini-LoRA.safetensors", "photo"),
"Clothing": ("prithivMLmods/Canopus-Clothing-Adp-LoRA", "Canopus-Dress-Clothing-LoRA.safetensors", "clth"),
"Interior": ("prithivMLmods/Canopus-Interior-Architecture-0.1", "Canopus-Interior-Architecture-0.1δ.safetensors", "arch"),
"Fashion": ("prithivMLmods/Canopus-Fashion-Product-Dilation", "Canopus-Fashion-Product-Dilation.safetensors", "fashion"),
"Minimalistic": ("prithivMLmods/Pegasi-Minimalist-Image-Style", "Pegasi-Minimalist-Image-Style.safetensors", "minimalist"),
"Modern": ("prithivMLmods/Canopus-Modern-Clothing-Design", "Canopus-Modern-Clothing-Design.safetensors", "mdrnclth"),
"Animaliea": ("prithivMLmods/Canopus-Animaliea-Artism", "Canopus-Animaliea-Artism.safetensors", "Animaliea"),
"Wallpaper": ("prithivMLmods/Canopus-Liquid-Wallpaper-Art", "Canopus-Liquid-Wallpaper-Minimalize-LoRA.safetensors", "liquid"),
"Cars": ("prithivMLmods/Canes-Cars-Model-LoRA", "Canes-Cars-Model-LoRA.safetensors", "car"),
"PencilArt": ("prithivMLmods/Canopus-Pencil-Art-LoRA", "Canopus-Pencil-Art-LoRA.safetensors", "Pencil Art"),
"ArtMinimalistic": ("prithivMLmods/Canopus-Art-Medium-LoRA", "Canopus-Art-Medium-LoRA.safetensors", "mdm"),
}
# Load all LoRA weights
for model_name, weight_name, adapter_name in LORA_OPTIONS.values():
pipe.load_lora_weights(model_name, weight_name=weight_name, adapter_name=adapter_name)
pipe.to("cuda")
else:
pipe = StableDiffusionXLPipeline.from_pretrained(
"SG161222/RealVisXL_V4.0_Lightning",
torch_dtype=torch.float32,
use_safetensors=True,
).to(device)
def save_image(img: Image.Image) -> str:
"""Save a PIL image with a unique filename and return the path."""
unique_name = str(uuid.uuid4()) + ".png"
img.save(unique_name)
return unique_name
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
@spaces.GPU(duration=180, enable_queue=True)
def generate_image(prompt: str, negative_prompt: str, seed: int, width: int, height: int, guidance_scale: float, randomize_seed: bool, lora_model: str):
seed = int(randomize_seed_fn(seed, randomize_seed))
effective_negative_prompt = negative_prompt # Use provided negative prompt if any
model_name, weight_name, adapter_name = LORA_OPTIONS[lora_model]
pipe.set_adapters(adapter_name)
outputs = pipe(
prompt=prompt,
negative_prompt=effective_negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=28,
num_images_per_prompt=1,
cross_attention_kwargs={"scale": 0.65},
output_type="pil",
)
images = outputs.images
image_paths = [save_image(img) for img in images]
return image_paths, seed
# -----------------------
# Chat Generation Function (Text-only)
# -----------------------
def generate_chat(input_text: str, chat_history: list, max_new_tokens: int, temperature: float, top_p: float, top_k: int, repetition_penalty: float):
conversation = clean_chat_history(chat_history)
conversation.append({"role": "user", "content": input_text})
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = {
"input_ids": input_ids,
"streamer": streamer,
"max_new_tokens": max_new_tokens,
"do_sample": True,
"top_p": top_p,
"top_k": top_k,
"temperature": temperature,
"num_beams": 1,
"repetition_penalty": repetition_penalty,
}
t = Thread(target=model.generate, kwargs=generation_kwargs)
t.start()
outputs = []
for new_text in streamer:
outputs.append(new_text)
final_response = "".join(outputs)
chat_history.append({"role": "assistant", "content": final_response})
return chat_history
# -----------------------
# Qwen2 VL OCR Function (Multimodal)
# -----------------------
def generate_ocr(text: str, files, max_new_tokens: int):
if files:
if isinstance(files, list) and len(files) > 1:
images = [load_image(image) for image in files]
elif isinstance(files, list) and len(files) == 1:
images = [load_image(files[0])]
else:
images = [load_image(files)]
messages = [{
"role": "user",
"content": [*([{"type": "image", "image": image} for image in images]),
{"type": "text", "text": text}]
}]
prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = processor(text=[prompt], images=images, return_tensors="pt", padding=True).to("cuda")
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens}
thread = Thread(target=model_m.generate, kwargs=generation_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
return buffer
else:
return "No images provided."
# -----------------------
# Text-to-Speech Function
# -----------------------
def generate_tts(text: str, voice: str):
output_file = asyncio.run(text_to_speech(text, voice))
return output_file
# -----------------------
# Gradio Interface with Tabs
# -----------------------
with gr.Blocks(css=css, title="Gen Vision") as demo:
gr.Markdown(DESCRIPTION)
with gr.Tab("Chat Interface"):
with gr.Row():
chat_history = gr.Chatbot(label="Chat History")
with gr.Row():
chat_input = gr.Textbox(placeholder="Enter your message", label="Your Message")
with gr.Row():
max_new_tokens_slider = gr.Slider(label="Max New Tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
temperature_slider = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
with gr.Row():
top_p_slider = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
top_k_slider = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
repetition_penalty_slider = gr.Slider(label="Repetition Penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
send_btn = gr.Button("Send")
send_btn.click(
fn=generate_chat,
inputs=[chat_input, chat_history, max_new_tokens_slider, temperature_slider, top_p_slider, top_k_slider, repetition_penalty_slider],
outputs=chat_history,
)
with gr.Tab("Image Generation"):
image_prompt = gr.Textbox(label="Prompt", placeholder="Enter image prompt")
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt")
seed_input = gr.Number(label="Seed", value=0)
width_slider = gr.Slider(label="Width", minimum=256, maximum=2048, step=64, value=1024)
height_slider = gr.Slider(label="Height", minimum=256, maximum=2048, step=64, value=1024)
guidance_scale_slider = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=3.0)
randomize_checkbox = gr.Checkbox(label="Randomize Seed", value=True)
lora_dropdown = gr.Dropdown(label="LoRA Style", choices=list(LORA_OPTIONS.keys()), value="Realism")
generate_img_btn = gr.Button("Generate Image")
img_output = gr.Image(label="Generated Image")
seed_output = gr.Number(label="Used Seed")
generate_img_btn.click(
fn=generate_image,
inputs=[image_prompt, negative_prompt, seed_input, width_slider, height_slider, guidance_scale_slider, randomize_checkbox, lora_dropdown],
outputs=[img_output, seed_output],
)
with gr.Tab("Qwen 2 VL OCR"):
ocr_text = gr.Textbox(label="Text Prompt", placeholder="Enter prompt for OCR")
file_input = gr.File(label="Upload Images", file_count="multiple")
ocr_max_new_tokens = gr.Slider(label="Max New Tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
ocr_btn = gr.Button("Run OCR")
ocr_output = gr.Textbox(label="OCR Output")
ocr_btn.click(
fn=generate_ocr,
inputs=[ocr_text, file_input, ocr_max_new_tokens],
outputs=ocr_output,
)
with gr.Tab("Text-to-Speech"):
tts_text = gr.Textbox(label="Text", placeholder="Enter text for TTS")
voice_dropdown = gr.Dropdown(label="Voice", choices=TTS_VOICES, value=TTS_VOICES[0])
tts_btn = gr.Button("Generate Audio")
tts_audio = gr.Audio(label="Audio Output", type="filepath")
tts_btn.click(
fn=generate_tts,
inputs=[tts_text, voice_dropdown],
outputs=tts_audio,
)
demo.queue(max_size=20).launch(share=True)