DemahAlmutairi commited on
Commit
a982432
·
verified ·
1 Parent(s): b14d715

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -0
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+ import torch
4
+
5
+ def load_model(model_name):
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ model_name,
9
+ device_map=device,
10
+ torch_dtype="auto",
11
+ trust_remote_code=True,
12
+ )
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ generator = pipeline(
15
+ "text-generation",
16
+ model=model,
17
+ tokenizer=tokenizer,
18
+ return_full_text=False,
19
+ max_new_tokens=500,
20
+ do_sample=False
21
+ )
22
+ return generator
23
+
24
+ def generate_text(prompt, model_name):
25
+ generator = load_model(model_name)
26
+ messages = [{"role": "user", "content": prompt}]
27
+ output = generator(messages)
28
+ return output[0]["generated_text"]
29
+
30
+ # Create Gradio interface
31
+ demo = gr.Interface(
32
+ fn=generate_text,
33
+ inputs=[
34
+ gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
35
+ gr.Dropdown(
36
+ choices=["Qwen/Qwen2.5-1.5B-Instruct","microsoft/Phi-3-mini-4k-instruct", "ALLaM-AI/ALLaM-7B-Instruct-preview"],
37
+ label="Choose Model",
38
+ value="ALLaM-AI/ALLaM-7B-Instruct-preview"
39
+ )
40
+ ],
41
+ outputs=gr.Textbox(label="Generated Text"),
42
+ title="Text Generator",
43
+ description="Enter a prompt and generate text using one of the available models.",
44
+ examples=[
45
+ ["Tell me a funny joke about chickens.", "microsoft/Phi-3-mini-4k-instruct"],
46
+ ["أخبرني نكتة مضحكة عن الدجاج.", "ALLaM-AI/ALLaM-7B-Instruct-preview"]
47
+ ]
48
+ )
49
+
50
+ demo.launch()