Daemontatox commited on
Commit
0318ee3
·
verified ·
1 Parent(s): 31b21a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -14,7 +14,7 @@ processor = AutoProcessor.from_pretrained(ckpt)
14
 
15
 
16
  @spaces.GPU()
17
- def bot_streaming(message, history, max_new_tokens=250):
18
 
19
  txt = message["text"]
20
  ext_buffer = f"{txt}"
@@ -86,13 +86,13 @@ demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
86
  additional_inputs = [gr.Slider(
87
  minimum=10,
88
  maximum=500,
89
- value=250,
90
  step=10,
91
  label="Maximum number of new tokens to generate",
92
  )
93
  ],
94
  cache_examples=False,
95
- description="Try Multimodal Llama by Meta with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
96
  stop_btn="Stop Generation",
97
  fill_height=True,
98
  multimodal=True)
 
14
 
15
 
16
  @spaces.GPU()
17
+ def bot_streaming(message, history, max_new_tokens=2048):
18
 
19
  txt = message["text"]
20
  ext_buffer = f"{txt}"
 
86
  additional_inputs = [gr.Slider(
87
  minimum=10,
88
  maximum=500,
89
+ value=2048,
90
  step=10,
91
  label="Maximum number of new tokens to generate",
92
  )
93
  ],
94
  cache_examples=False,
95
+ description="Try Multimodal with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. ",
96
  stop_btn="Stop Generation",
97
  fill_height=True,
98
  multimodal=True)