import os import torch from fastapi import FastAPI from transformers import AutoTokenizer, AutoModelForCausalLM import uvicorn from fastapi.middleware.cors import CORSMiddleware app = FastAPI(docs_url="/") app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"], ) tokenizer = AutoTokenizer.from_pretrained("succinctly/text2image-prompt-generator") model = AutoModelForCausalLM.from_pretrained("succinctly/text2image-prompt-generator") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) model.eval() @app.post("/text") async def text(text: str): try: inputs = tokenizer(text, return_tensors="pt").to(device) outputs = model.generate(**inputs) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return {"generated_text": generated_text} except Exception as e: return {"error": str(e)} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)