from fastapi import FastAPI from transformers import AutoModelForCausalLM, AutoTokenizer app = FastAPI() # Load Fine-Tuned Model model_name = "/app/models/fine-tuned" tokenizer = AutoTokenizer.from_pretrained("TheBloke/Pygmalion-7B-GPTQ") model = AutoModelForCausalLM.from_pretrained(model_name) @app.get("/chat") def chat(msg: str): inputs = tokenizer(msg, return_tensors="pt") response = model.generate(**inputs, max_length=200) return {"response": tokenizer.decode(response[0], skip_special_tokens=True)}