skylersterling commited on
Commit
3f73fe1
·
verified ·
1 Parent(s): ff90fb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -3
app.py CHANGED
@@ -22,9 +22,6 @@ def generate_text(prompt, temperature, top_p):
22
  generated_text = prompt_with_eos # Start with the initial prompt plus "EOS"
23
  prompt_length = len(generated_text)
24
 
25
- if input_tokens.size(1) > 512:
26
- generated_text = "ERROR, CONTEXT SIZE EXCEEDED"
27
-
28
  for _ in range(80): # Adjust the range to control the number of tokens generated
29
  with torch.no_grad():
30
  outputs = model(input_tokens)
 
22
  generated_text = prompt_with_eos # Start with the initial prompt plus "EOS"
23
  prompt_length = len(generated_text)
24
 
 
 
 
25
  for _ in range(80): # Adjust the range to control the number of tokens generated
26
  with torch.no_grad():
27
  outputs = model(input_tokens)