File size: 994 Bytes
384f319
af3d431
384f319
 
 
 
 
e06ee10
 
 
384f319
af3d431
 
 
 
384f319
 
 
 
 
af3d431
 
e06ee10
384f319
af3d431
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
from fastapi import FastAPI
from pydantic import BaseModel  # Import BaseModel untuk mendefinisikan model data
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

app = FastAPI()

# Load model and tokenizer tanpa mendefinisikan cache_dir
tokenizer = AutoTokenizer.from_pretrained("unsloth/Llama-3.2-1B-Instruct")
model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-1B-Instruct").to("cpu")

# Definisikan model data untuk body JSON
class GenerateRequest(BaseModel):
    prompt: str

@app.get("/")
def home():
    return {"message": "FastAPI running with Llama-3.2-1B-Instruct"}

@app.post("/generate")
def generate_text(request: GenerateRequest):  # Gunakan model data sebagai parameter
    inputs = tokenizer(request.prompt, return_tensors="pt").to("cpu")  # Ambil prompt dari request
    output = model.generate(**inputs, max_length=300)
    generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
    return {"generated_text": generated_text}