vladyur commited on
Commit
47d0a4a
·
1 Parent(s): 1188895

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -5,7 +5,7 @@ import streamlit as st
5
  import re
6
 
7
 
8
- @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None, re.Pattern: lambda _: None, allow_output_mutation=True}, suppress_st_warning=True)
9
  def get_model(model_name, model_path):
10
  tokenizer = transformers.GPT2Tokenizer.from_pretrained(model_name)
11
  model = transformers.GPT2LMHeadModel.from_pretrained(model_name)
@@ -14,7 +14,7 @@ def get_model(model_name, model_path):
14
  return model, tokenizer
15
 
16
 
17
- @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None, re.Pattern: lambda _: None, allow_output_mutation=True}, suppress_st_warning=True)
18
  def predict(text, model, tokenizer, n_beams=5, temperature=2.5, top_p=0.8, max_length=300):
19
  input_ids = tokenizer.encode(text, return_tensors="pt")
20
  with torch.no_grad():
 
5
  import re
6
 
7
 
8
+ @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None, re.Pattern: lambda _: None}, allow_output_mutation=True, suppress_st_warning=True)
9
  def get_model(model_name, model_path):
10
  tokenizer = transformers.GPT2Tokenizer.from_pretrained(model_name)
11
  model = transformers.GPT2LMHeadModel.from_pretrained(model_name)
 
14
  return model, tokenizer
15
 
16
 
17
+ @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None, re.Pattern: lambda _: None}, allow_output_mutation=True, suppress_st_warning=True)
18
  def predict(text, model, tokenizer, n_beams=5, temperature=2.5, top_p=0.8, max_length=300):
19
  input_ids = tokenizer.encode(text, return_tensors="pt")
20
  with torch.no_grad():