sandz7 commited on
Commit
63eb3fb
Β·
1 Parent(s): 540729d

added async to bot_comms and llama_generation

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -79,10 +79,10 @@ def gpt_generation(input: str,
79
 
80
  # Place just input pass and return generation output
81
  @spaces.GPU(duration=120)
82
- def llama_generation(input_text: str,
83
- history: list,
84
- temperature: float,
85
- max_new_tokens: int):
86
  """
87
  Pass input texts, tokenize, output and back to text.
88
  """
@@ -130,10 +130,10 @@ def check_cuda():
130
  first_time = True
131
  llm_mode = ""
132
 
133
- def bot_comms(input_text: str,
134
- history: list,
135
- temperature: float,
136
- max_new_tokens: int):
137
  """
138
  The connection between gradio and the LLM's
139
  """
 
79
 
80
  # Place just input pass and return generation output
81
  @spaces.GPU(duration=120)
82
+ async def llama_generation(input_text: str,
83
+ history: list,
84
+ temperature: float,
85
+ max_new_tokens: int):
86
  """
87
  Pass input texts, tokenize, output and back to text.
88
  """
 
130
  first_time = True
131
  llm_mode = ""
132
 
133
+ async def bot_comms(input_text: str,
134
+ history: list,
135
+ temperature: float,
136
+ max_new_tokens: int):
137
  """
138
  The connection between gradio and the LLM's
139
  """