DemahAlmutairi commited on
Commit
8cb6990
·
verified ·
1 Parent(s): 9ad974d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -11
app.py CHANGED
@@ -3,6 +3,9 @@ import gradio as gr
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
  import spaces
5
 
 
 
 
6
  # Create the necessary directories
7
  os.makedirs('.gradio/cached_examples/17', exist_ok=True)
8
 
@@ -34,6 +37,9 @@ def load_model(model_name):
34
  top_k=50, # Control diversity
35
  top_p=0.95 # Control diversity
36
  )
 
 
 
37
  return generator
38
 
39
  @spaces.GPU
@@ -51,18 +57,15 @@ def generate_kids_story(character, setting, language):
51
 
52
  messages = [{"role": "user", "content": prompt}]
53
  output = generator(messages)
54
- return output[0]["generated_text"]
55
 
56
- import gc
57
- import torch
58
- # Delete model and associated objects
59
- del model
60
- del tokenizer
61
- del generator
62
- # Run garbage collection
63
- gc.collect ()
64
- # Empty CUDA cache
65
- torch.cuda.empty_cache()
66
 
67
  # Create Gradio interface
68
  demo = gr.Interface(
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
  import spaces
5
 
6
+ import gc
7
+ import torch
8
+
9
  # Create the necessary directories
10
  os.makedirs('.gradio/cached_examples/17', exist_ok=True)
11
 
 
37
  top_k=50, # Control diversity
38
  top_p=0.95 # Control diversity
39
  )
40
+ del model
41
+ del tokenizer
42
+
43
  return generator
44
 
45
  @spaces.GPU
 
57
 
58
  messages = [{"role": "user", "content": prompt}]
59
  output = generator(messages)
 
60
 
61
+ # Delete model and associated objects
62
+ del generator
63
+ # Run garbage collection
64
+ gc.collect ()
65
+ # Empty CUDA cache
66
+ torch.cuda.empty_cache()
67
+
68
+ return output[0]["generated_text"]
 
 
69
 
70
  # Create Gradio interface
71
  demo = gr.Interface(