namfam commited on
Commit
5a3fe0e
·
verified ·
1 Parent(s): 51a2abd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -14,8 +14,8 @@ model = AutoModelForCausalLM.from_pretrained('HuggingFaceM4/Florence-2-DocVQA',
14
  processor = AutoProcessor.from_pretrained('HuggingFaceM4/Florence-2-DocVQA', trust_remote_code=True)
15
 
16
 
17
- TITLE = "# [Descripta Demo](https://huggingface.co/HuggingFaceM4/Florence-2-DocVQA)"
18
- DESCRIPTION = "The demo for Florence-2 fine-tuned on DocVQA dataset. You can find the notebook [here](https://colab.research.google.com/drive/1hKDrJ5AH_o7I95PtZ9__VlCTNAo1Gjpf?usp=sharing). Read more about Florence-2 fine-tuning [here](finetune-florence2)."
19
 
20
 
21
  colormap = ['blue','orange','green','purple','brown','pink','gray','olive','cyan','red',
@@ -74,9 +74,9 @@ with gr.Blocks(css=css) as demo:
74
  gr.Examples(
75
  examples=[
76
  ["hunt.jpg", 'What is this image?'],
77
- ["idefics2_architecture.png", 'How many tokens per image does it use?'],
78
- ["idefics2_architecture.png", "What type of encoder does the model use?"],
79
- ["image.jpg", "What's the share of Industry Switchers Gained?"]
80
  ],
81
  inputs=[input_img, text_input],
82
  outputs=[output_text],
 
14
  processor = AutoProcessor.from_pretrained('HuggingFaceM4/Florence-2-DocVQA', trust_remote_code=True)
15
 
16
 
17
+ TITLE = "#Descripta"
18
+ DESCRIPTION = "Generate high-quality product descriptions from images and metadata"
19
 
20
 
21
  colormap = ['blue','orange','green','purple','brown','pink','gray','olive','cyan','red',
 
74
  gr.Examples(
75
  examples=[
76
  ["hunt.jpg", 'What is this image?'],
77
+ # ["idefics2_architecture.png", 'How many tokens per image does it use?'],
78
+ # ["idefics2_architecture.png", "What type of encoder does the model use?"],
79
+ # ["image.jpg", "What's the share of Industry Switchers Gained?"]
80
  ],
81
  inputs=[input_img, text_input],
82
  outputs=[output_text],