John Graham Reynolds commited on
Commit
f163dd7
·
1 Parent(s): f44fbb9

with HF GPUs are available again, put model and inputs on GPU

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. model.py +2 -2
app.py CHANGED
@@ -113,8 +113,8 @@ def get_stream_warning_error(stream):
113
  # return chat_completion
114
 
115
  def model_inference(messages):
116
- # input_ids = tokenizer(get_last_question(), return_tensors="pt").input_ids.to("cuda") # tokenize the input and put it on the GPU
117
- input_ids = tokenizer(get_last_question(), return_tensors="pt").input_ids # testing on CPU
118
  outputs = model.generate(input_ids)
119
  for chunk in tokenizer.decode(outputs[0], skip_special_tokens=True):
120
  yield chunk # yield each chunk of the predicted string character by character
 
113
  # return chat_completion
114
 
115
  def model_inference(messages):
116
+ input_ids = tokenizer(get_last_question(), return_tensors="pt").input_ids.to("cuda") # tokenize the input and put it on the GPU
117
+ # input_ids = tokenizer(get_last_question(), return_tensors="pt").input_ids # testing on CPU
118
  outputs = model.generate(input_ids)
119
  for chunk in tokenizer.decode(outputs[0], skip_special_tokens=True):
120
  yield chunk # yield each chunk of the predicted string character by character
model.py CHANGED
@@ -27,8 +27,8 @@ class InferenceBuilder:
27
  # cannot directly use @st.cache_resource on a method (function within a class) that has a self argument
28
  @st.cache_resource # https://docs.streamlit.io/develop/concepts/architecture/caching
29
  def load_and_cache_model(model_name):
30
- # model = T5ForConditionalGeneration.from_pretrained(model_name).to("cuda") # put the model on our Space's GPU
31
- model = T5ForConditionalGeneration.from_pretrained(model_name) # testing on CPU
32
  return model
33
 
34
  return load_and_cache_model(model_name)
 
27
  # cannot directly use @st.cache_resource on a method (function within a class) that has a self argument
28
  @st.cache_resource # https://docs.streamlit.io/develop/concepts/architecture/caching
29
  def load_and_cache_model(model_name):
30
+ model = T5ForConditionalGeneration.from_pretrained(model_name).to("cuda") # put the model on our Space's GPU
31
+ # model = T5ForConditionalGeneration.from_pretrained(model_name) # testing on CPU
32
  return model
33
 
34
  return load_and_cache_model(model_name)