dar-tau commited on
Commit
c3144ec
·
verified ·
1 Parent(s): c235215

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -12,12 +12,13 @@ pipe = pipeline("text-generation", model=model_name, device="cuda")
12
 
13
  generate_kwargs = {'max_new_tokens': 20}
14
 
15
- system_prompt = '''You are given an input text for a chat interface. Propose auto-completion to the text. You have several roles:
16
- - Fight under-specification: if the user does not provide sufficient context, propose them a set of relevant suggestions.
17
- - Complete text: The text provided to you is in the making. If you have a good idea for how to complete - make suggestions.
18
 
 
19
  Make sure the suggestions are valid completions of the text! No need for them to complete the text completely.
20
- Suggest only up to 5 works ahead.
21
  '''
22
 
23
  @spaces.GPU
@@ -26,7 +27,7 @@ def generate(text):
26
  {'role': 'system', 'content': system_prompt},
27
  {'role': 'user', 'content': text}
28
  ]
29
- return pipe(messages, **generate_kwargs)
30
 
31
 
32
  if __name__ == "__main__":
 
12
 
13
  generate_kwargs = {'max_new_tokens': 20}
14
 
15
+ system_prompt = '''You are given a partial input text for a chat interface. Propose auto-completion to the text. You have several roles:
16
+ - Fight under-specification.
17
+ - Complete text to save the user time.
18
 
19
+ Don't suggest anything if there are no good suggestions.
20
  Make sure the suggestions are valid completions of the text! No need for them to complete the text completely.
21
+ Suggest only up to 5 works ahead. The scheme of your answer should be "answer1;answer2;answer3" (return between 0 to 4 answers).
22
  '''
23
 
24
  @spaces.GPU
 
27
  {'role': 'system', 'content': system_prompt},
28
  {'role': 'user', 'content': text}
29
  ]
30
+ return pipe(messages, **generate_kwargs)['generated_text'][-1]['content']
31
 
32
 
33
  if __name__ == "__main__":