Spaces:
Running
Running
File size: 1,386 Bytes
2838208 3fa67f0 2838208 3fa67f0 5bf3e73 3fa67f0 5bf3e73 3fa67f0 5bf3e73 3fa67f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch
from transformers import MarianMTModel, MarianTokenizer
# Load translation model
model_name = "Helsinki-NLP/opus-mt-mul-en"
tokenizer = MarianTokenizer.from_pretrained(model_name)
translation_model = MarianMTModel.from_pretrained(model_name)
def translate_to_english(text):
inputs = tokenizer(text, return_tensors="pt", padding=True)
translated = translation_model.generate(**inputs)
translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
return translated_text
# Load Stable Diffusion on CPU
pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float32 # Use float32 for CPU
)
pipe = pipe.to("cpu") # Force CPU mode
def generate_image(prompt):
if not prompt.isascii(): # If non-English
prompt = translate_to_english(prompt)
# Generate image (no autocast on CPU)
image = pipe(prompt).images[0]
return image
# Gradio Interface
app = gr.Interface(
fn=generate_image,
inputs=gr.Textbox(label="Enter prompt (any language)"),
outputs=gr.Image(label="Generated Image"),
title="🌍 Multilingual Text-to-Image Generator (CPU Mode)",
description="Type in **English, हिंदी, मराठी, Deutsch, etc.** and get an image! (Slower on CPU)"
)
app.launch() |