Spaces:
Running
on
L40S
Running
on
L40S
Update app.py
Browse files
app.py
CHANGED
@@ -164,7 +164,7 @@ def process_history(history: list[dict]) -> list[dict]:
|
|
164 |
|
165 |
|
166 |
@spaces.GPU(duration=120)
|
167 |
-
def run(message: dict, history: list[dict]
|
168 |
if not validate_media_constraints(message, history):
|
169 |
yield ""
|
170 |
return
|
@@ -186,7 +186,7 @@ def run(message: dict, history: list[dict], ignored_system_prompt: str = "", max
|
|
186 |
generate_kwargs = dict(
|
187 |
inputs,
|
188 |
streamer=streamer,
|
189 |
-
max_new_tokens=
|
190 |
)
|
191 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
192 |
t.start()
|
@@ -225,10 +225,6 @@ demo = gr.ChatInterface(
|
|
225 |
chatbot=gr.Chatbot(type="messages", scale=1, allow_tags=["image"]),
|
226 |
textbox=gr.MultimodalTextbox(file_types=["image", ".mp4"], file_count="multiple", autofocus=True),
|
227 |
multimodal=True,
|
228 |
-
additional_inputs=[
|
229 |
-
gr.Textbox(label="System Prompt", value="You are a helpful assistant."),
|
230 |
-
gr.Slider(label="Max New Tokens", minimum=100, maximum=2000, step=10, value=700),
|
231 |
-
],
|
232 |
stop_btn=False,
|
233 |
title="ChatFinanz",
|
234 |
examples=examples,
|
|
|
164 |
|
165 |
|
166 |
@spaces.GPU(duration=120)
|
167 |
+
def run(message: dict, history: list[dict]) -> Iterator[str]:
|
168 |
if not validate_media_constraints(message, history):
|
169 |
yield ""
|
170 |
return
|
|
|
186 |
generate_kwargs = dict(
|
187 |
inputs,
|
188 |
streamer=streamer,
|
189 |
+
max_new_tokens=2048,
|
190 |
)
|
191 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
192 |
t.start()
|
|
|
225 |
chatbot=gr.Chatbot(type="messages", scale=1, allow_tags=["image"]),
|
226 |
textbox=gr.MultimodalTextbox(file_types=["image", ".mp4"], file_count="multiple", autofocus=True),
|
227 |
multimodal=True,
|
|
|
|
|
|
|
|
|
228 |
stop_btn=False,
|
229 |
title="ChatFinanz",
|
230 |
examples=examples,
|