merve HF Staff commited on
Commit
67efffc
·
verified ·
1 Parent(s): 967ff26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -74
app.py CHANGED
@@ -101,80 +101,80 @@ with gr.Blocks(fill_height=True) as demo:
101
  outputs=output,
102
  fn=model_inference
103
  )
104
- with gr.Accordion():
105
- # Hyper-parameters for generation
106
- max_new_tokens = gr.Slider(
107
- minimum=8,
108
- maximum=1024,
109
- value=512,
110
- step=1,
111
- interactive=True,
112
- label="Maximum number of new tokens to generate",
113
- )
114
- repetition_penalty = gr.Slider(
115
- minimum=0.01,
116
- maximum=5.0,
117
- value=1.2,
118
- step=0.01,
119
- interactive=True,
120
- label="Repetition penalty",
121
- info="1.0 is equivalent to no penalty",
122
- )
123
- temperature = gr.Slider(
124
- minimum=0.0,
125
- maximum=5.0,
126
- value=0.4,
127
- step=0.1,
128
- interactive=True,
129
- label="Sampling temperature",
130
- info="Higher values will produce more diverse outputs.",
131
- )
132
- top_p = gr.Slider(
133
- minimum=0.01,
134
- maximum=0.99,
135
- value=0.8,
136
- step=0.01,
137
- interactive=True,
138
- label="Top P",
139
- info="Higher values is equivalent to sampling more low-probability tokens.",
140
- )
141
- decoding_strategy = gr.Radio(
142
- [
143
- "Greedy",
144
- "Top P Sampling",
145
- ],
146
- value="Greedy",
147
- label="Decoding strategy",
148
- interactive=True,
149
- info="Higher values is equivalent to sampling more low-probability tokens.",
150
- )
151
- decoding_strategy.change(
152
- fn=lambda selection: gr.Slider(
153
- visible=(
154
- selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
155
- )
156
- ),
157
- inputs=decoding_strategy,
158
- outputs=temperature,
159
- )
160
-
161
- decoding_strategy.change(
162
- fn=lambda selection: gr.Slider(
163
- visible=(
164
- selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
165
- )
166
- ),
167
- inputs=decoding_strategy,
168
- outputs=repetition_penalty,
169
- )
170
- decoding_strategy.change(
171
- fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
172
- inputs=decoding_strategy,
173
- outputs=top_p,
174
- )
175
-
176
- submit_btn.click(model_inference, inputs = [image_input, query_input, decoding_strategy, temperature,
177
- max_new_tokens, repetition_penalty, top_p], outputs=output)
178
 
 
 
 
179
 
180
  demo.launch(debug=True)
 
101
  outputs=output,
102
  fn=model_inference
103
  )
104
+ with gr.Accordion():
105
+ # Hyper-parameters for generation
106
+ max_new_tokens = gr.Slider(
107
+ minimum=8,
108
+ maximum=1024,
109
+ value=512,
110
+ step=1,
111
+ interactive=True,
112
+ label="Maximum number of new tokens to generate",
113
+ )
114
+ repetition_penalty = gr.Slider(
115
+ minimum=0.01,
116
+ maximum=5.0,
117
+ value=1.2,
118
+ step=0.01,
119
+ interactive=True,
120
+ label="Repetition penalty",
121
+ info="1.0 is equivalent to no penalty",
122
+ )
123
+ temperature = gr.Slider(
124
+ minimum=0.0,
125
+ maximum=5.0,
126
+ value=0.4,
127
+ step=0.1,
128
+ interactive=True,
129
+ label="Sampling temperature",
130
+ info="Higher values will produce more diverse outputs.",
131
+ )
132
+ top_p = gr.Slider(
133
+ minimum=0.01,
134
+ maximum=0.99,
135
+ value=0.8,
136
+ step=0.01,
137
+ interactive=True,
138
+ label="Top P",
139
+ info="Higher values is equivalent to sampling more low-probability tokens.",
140
+ )
141
+ decoding_strategy = gr.Radio(
142
+ [
143
+ "Greedy",
144
+ "Top P Sampling",
145
+ ],
146
+ value="Greedy",
147
+ label="Decoding strategy",
148
+ interactive=True,
149
+ info="Higher values is equivalent to sampling more low-probability tokens.",
150
+ )
151
+ decoding_strategy.change(
152
+ fn=lambda selection: gr.Slider(
153
+ visible=(
154
+ selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
155
+ )
156
+ ),
157
+ inputs=decoding_strategy,
158
+ outputs=temperature,
159
+ )
160
+
161
+ decoding_strategy.change(
162
+ fn=lambda selection: gr.Slider(
163
+ visible=(
164
+ selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
165
+ )
166
+ ),
167
+ inputs=decoding_strategy,
168
+ outputs=repetition_penalty,
169
+ )
170
+ decoding_strategy.change(
171
+ fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
172
+ inputs=decoding_strategy,
173
+ outputs=top_p,
174
+ )
 
 
 
175
 
176
+ submit_btn.click(model_inference, inputs = [image_input, query_input, decoding_strategy, temperature,
177
+ max_new_tokens, repetition_penalty, top_p], outputs=output)
178
+
179
 
180
  demo.launch(debug=True)