prithivMLmods commited on
Commit
3e5ce53
·
verified ·
1 Parent(s): b71cea0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -84,6 +84,7 @@ def clean_chat_history(chat_history):
84
  cleaned.append(msg)
85
  return cleaned
86
 
 
87
  def chat_generate(input_text: str, chat_history: list, max_new_tokens: int, temperature: float, top_p: float, top_k: int, repetition_penalty: float):
88
  """
89
  Chat generation using a text-only model.
@@ -241,6 +242,7 @@ def apply_style(style_name: str, positive: str, negative: str = ""):
241
  p, n = styles[DEFAULT_STYLE_NAME]
242
  return p.replace("{prompt}", positive), n + (negative if negative else "")
243
 
 
244
  def generate_image_lora(prompt: str, negative_prompt: str, use_negative_prompt: bool, seed: int, width: int, height: int, guidance_scale: float, randomize_seed: bool, style_name: str, lora_model: str):
245
  seed = int(randomize_seed_fn(seed, randomize_seed))
246
  positive_prompt, effective_negative_prompt = apply_style(style_name, prompt, negative_prompt)
 
84
  cleaned.append(msg)
85
  return cleaned
86
 
87
+ @spaces.GPU
88
  def chat_generate(input_text: str, chat_history: list, max_new_tokens: int, temperature: float, top_p: float, top_k: int, repetition_penalty: float):
89
  """
90
  Chat generation using a text-only model.
 
242
  p, n = styles[DEFAULT_STYLE_NAME]
243
  return p.replace("{prompt}", positive), n + (negative if negative else "")
244
 
245
+ @spaces.GPU
246
  def generate_image_lora(prompt: str, negative_prompt: str, use_negative_prompt: bool, seed: int, width: int, height: int, guidance_scale: float, randomize_seed: bool, style_name: str, lora_model: str):
247
  seed = int(randomize_seed_fn(seed, randomize_seed))
248
  positive_prompt, effective_negative_prompt = apply_style(style_name, prompt, negative_prompt)