DemahAlmutairi commited on
Commit
5fed0cd
·
verified ·
1 Parent(s): f350335

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -13
app.py CHANGED
@@ -3,6 +3,15 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
4
  import spaces
5
 
 
 
 
 
 
 
 
 
 
6
  def load_model(model_name):
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
  model = AutoModelForCausalLM.from_pretrained(
@@ -18,35 +27,53 @@ def load_model(model_name):
18
  tokenizer=tokenizer,
19
  return_full_text=False,
20
  max_new_tokens=500,
21
- do_sample=False
 
 
22
  )
23
  return generator
24
 
 
25
  @spaces.GPU
26
- def generate_text(prompt, model_name):
 
27
  generator = load_model(model_name)
 
 
 
 
 
 
 
 
 
28
  messages = [{"role": "user", "content": prompt}]
29
  output = generator(messages)
30
  return output[0]["generated_text"]
31
 
32
  # Create Gradio interface
33
  demo = gr.Interface(
34
- fn=generate_text,
35
  inputs=[
36
- gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
 
37
  gr.Dropdown(
38
- choices=["Qwen/Qwen2.5-1.5B-Instruct","microsoft/Phi-3-mini-4k-instruct", "ALLaM-AI/ALLaM-7B-Instruct-preview"],
39
- label="Choose Model",
40
- value="ALLaM-AI/ALLaM-7B-Instruct-preview"
41
  )
42
  ],
43
- outputs=gr.Textbox(label="Generated Text"),
44
- title="Text Generator",
45
- description="Enter a prompt and generate text using one of the available models.",
46
  examples=[
47
- ["Tell me a funny joke about chickens.", "microsoft/Phi-3-mini-4k-instruct"],
48
- ["أخبرني نكتة مضحكة عن الدجاج.", "ALLaM-AI/ALLaM-7B-Instruct-preview"]
49
- ]
 
 
 
50
  )
51
 
 
52
  demo.launch()
 
3
  import torch
4
  import spaces
5
 
6
+
7
+ def get_model_name(language):
8
+ """Map language choice to the corresponding model."""
9
+ model_mapping = {
10
+ "English": "microsoft/Phi-3-mini-4k-instruct",
11
+ "Arabic": "ALLaM-AI/ALLaM-7B-Instruct-preview"
12
+ }
13
+ return model_mapping.get(language, "ALLaM-AI/ALLaM-7B-Instruct-preview") # Default to Arabic model
14
+
15
  def load_model(model_name):
16
  device = "cuda" if torch.cuda.is_available() else "cpu"
17
  model = AutoModelForCausalLM.from_pretrained(
 
27
  tokenizer=tokenizer,
28
  return_full_text=False,
29
  max_new_tokens=500,
30
+ do_sample=True, # Enable sampling for more creative outputs
31
+ top_k=50, # Control diversity
32
+ top_p=0.95 # Control diversity
33
  )
34
  return generator
35
 
36
+
37
  @spaces.GPU
38
+ def generate_kids_story(character, setting, language):
39
+ model_name = get_model_name(language)
40
  generator = load_model(model_name)
41
+
42
+ # Define prompt for the AI model
43
+ if language == "English":
44
+ prompt = (f"Write a short story for kids about a character named {character} who goes on an adventure in {setting}. "
45
+ "Make it fun, engaging, and suitable for children.")
46
+ else:
47
+ prompt = (f"اكتب قصة قصيرة للأطفال عن شخصية اسمها {character} التي تذهب في مغامرة في {setting}. "
48
+ "اجعلها ممتعة وجذابة ومناسبة للأطفال.")
49
+
50
  messages = [{"role": "user", "content": prompt}]
51
  output = generator(messages)
52
  return output[0]["generated_text"]
53
 
54
  # Create Gradio interface
55
  demo = gr.Interface(
56
+ fn=generate_kids_story,
57
  inputs=[
58
+ gr.Textbox(placeholder="Enter a character name (e.g., Benny the Bunny)...", label="Character Name"),
59
+ gr.Textbox(placeholder="Enter a setting (e.g., a magical forest)...", label="Setting"),
60
  gr.Dropdown(
61
+ choices=["English", "Arabic"],
62
+ label="Choose Language",
63
+ value="English" # Default to English
64
  )
65
  ],
66
+ outputs=gr.Textbox(label="Kids' Story"),
67
+ title="📖 AI Kids' Story Generator - English & Arabic 📖",
68
+ description="Enter a character name and a setting, and AI will generate a fun short story for kids in English or Arabic.",
69
  examples=[
70
+ ["Benny the Bunny", "a magical forest", "English"],
71
+ ["علي البطل", "غابة سحرية", "Arabic"],
72
+ ["Lila the Ladybug", "a garden full of flowers", "English"],
73
+ ["ليلى الجنية", "حديقة مليئة بالأزهار", "Arabic"]
74
+ ],
75
+ theme="default",
76
  )
77
 
78
+ # Launch the Gradio app
79
  demo.launch()