ApaCu commited on
Commit
d642290
·
verified ·
1 Parent(s): 6a35f97

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from fastapi import FastAPI, Query
3
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
4
+ import uvicorn
5
+ import torch
6
+
7
+ # Inisialisasi FastAPI
8
+ app = FastAPI()
9
+
10
+ # Inisialisasi model dan tokenizer
11
+ def load_model(model_name):
12
+ if model_name == "mixtral":
13
+ model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
14
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
15
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16)
16
+ return pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")
17
+ elif model_name == "gpt2":
18
+ return pipeline("text-generation", model="gpt2")
19
+ else:
20
+ raise ValueError("Model tidak didukung. Pilih 'mixtral' atau 'gpt2'.")
21
+
22
+ # Fungsi untuk menghasilkan teks
23
+ def generate_text(prompt, model_name, max_length=100):
24
+ try:
25
+ generator = load_model(model_name)
26
+ # Menghasilkan teks
27
+ output = generator(prompt, max_length=max_length, num_return_sequences=1, do_sample=True)
28
+ return output[0]["generated_text"]
29
+ except Exception as e:
30
+ return f"Error: {str(e)}"
31
+
32
+ # Endpoint API
33
+ @app.get("/generate")
34
+ async def generate(prompt: str = Query(..., description="Teks input untuk model"),
35
+ model: str = Query("gpt2", description="Model AI: 'mixtral' atau 'gpt2'")):
36
+ result = generate_text(prompt, model)
37
+ return {"prompt": prompt, "model": model, "generated_text": result}
38
+
39
+ # Antarmuka Gradio
40
+ def gradio_generate(prompt, model_choice):
41
+ return generate_text(prompt, model_choice)
42
+
43
+ with gr.Blocks() as demo:
44
+ gr.Markdown("# AI Text Generation API")
45
+ gr.Markdown("Masukkan teks dan pilih model untuk menghasilkan teks. Gunakan API di `/generate` untuk akses programatik.")
46
+
47
+ # Komponen input
48
+ prompt_input = gr.Textbox(label="Prompt", placeholder="Masukkan teks di sini...")
49
+ model_choice = gr.Dropdown(choices=["gpt2", "mixtral"], label="Pilih Model", value="gpt2")
50
+ submit_button = gr.Button("Generate")
51
+
52
+ # Komponen output
53
+ output_text = gr.Textbox(label="Hasil Generasi")
54
+
55
+ # Menghubungkan tombol dengan fungsi
56
+ submit_button.click(
57
+ fn=gradio_generate,
58
+ inputs=[prompt_input, model_choice],
59
+ outputs=output_text
60
+ )
61
+
62
+ # Menjalankan aplikasi (untuk lokal, bukan di Hugging Face)
63
+ if __name__ == "__main__":
64
+ uvicorn.run(app, host="0.0.0.0", port=7860)
65
+ else:
66
+ # Untuk Hugging Face Spaces, luncurkan Gradio
67
+ demo.launch()