placed the output in list comprehension and just returned the output from output_list
Browse files
app.py
CHANGED
@@ -75,13 +75,17 @@ def llama_generation(input_text: str,
|
|
75 |
thread = Thread(target=llama_model.generate, kwargs=generate_kwargs)
|
76 |
thread.start()
|
77 |
|
78 |
-
outputs = []
|
79 |
-
|
80 |
-
|
81 |
-
yield "".join(outputs)
|
82 |
|
83 |
-
#
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
|
87 |
# Let's just make sure the llama is returning as it should and than place that return output into a function making it fit into a base
|
|
|
75 |
thread = Thread(target=llama_model.generate, kwargs=generate_kwargs)
|
76 |
thread.start()
|
77 |
|
78 |
+
outputs = [text for text in streamer]
|
79 |
+
output_text = output_list(outputs)
|
80 |
+
return output_text
|
|
|
81 |
|
82 |
+
# outputs = []
|
83 |
+
# for text in streamer:
|
84 |
+
# outputs.append(text)
|
85 |
+
# yield "".join(outputs)
|
86 |
+
|
87 |
+
# # Convert output into string
|
88 |
+
# print(output_list(outputs))
|
89 |
|
90 |
|
91 |
# Let's just make sure the llama is returning as it should and than place that return output into a function making it fit into a base
|