sandz7 commited on
Commit
2e99ee0
Β·
1 Parent(s): 6def1b9

placed the output in list comprehension and just returned the output from output_list

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -75,13 +75,17 @@ def llama_generation(input_text: str,
75
  thread = Thread(target=llama_model.generate, kwargs=generate_kwargs)
76
  thread.start()
77
 
78
- outputs = []
79
- for text in streamer:
80
- outputs.append(text)
81
- yield "".join(outputs)
82
 
83
- # Convert output into string
84
- print(output_list(outputs))
 
 
 
 
 
85
 
86
 
87
  # Let's just make sure the llama is returning as it should and than place that return output into a function making it fit into a base
 
75
  thread = Thread(target=llama_model.generate, kwargs=generate_kwargs)
76
  thread.start()
77
 
78
+ outputs = [text for text in streamer]
79
+ output_text = output_list(outputs)
80
+ return output_text
 
81
 
82
+ # outputs = []
83
+ # for text in streamer:
84
+ # outputs.append(text)
85
+ # yield "".join(outputs)
86
+
87
+ # # Convert output into string
88
+ # print(output_list(outputs))
89
 
90
 
91
  # Let's just make sure the llama is returning as it should and than place that return output into a function making it fit into a base