iamrobotbear commited on
Commit
3bc78d3
·
1 Parent(s): 1e2ba49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -3,12 +3,13 @@ from transformers import AutoProcessor, Blip2ForConditionalGeneration
3
  import torch
4
  from PIL import Image
5
 
6
- # Set device to GPU if available
7
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
-
9
  # Load the BLIP-2 model and processor
10
  processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
11
- model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b").to(device)
 
 
 
 
12
 
13
  def blip2_interface(image, prompted_caption_text, vqa_question, chat_context):
14
  # Prepare image input
@@ -40,8 +41,8 @@ def blip2_interface(image, prompted_caption_text, vqa_question, chat_context):
40
 
41
  # Define Gradio input and output components
42
  image_input = gr.inputs.Image(type="numpy")
43
- text_input = gr.components.TextInput()
44
- output_text = gr.components.Textbox()
45
 
46
  # Create Gradio interface
47
  iface = gr.Interface(
 
3
  import torch
4
  from PIL import Image
5
 
 
 
 
6
  # Load the BLIP-2 model and processor
7
  processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
8
+ model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b")
9
+
10
+ # Set device to GPU if available
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
+ model.to(device)
13
 
14
  def blip2_interface(image, prompted_caption_text, vqa_question, chat_context):
15
  # Prepare image input
 
41
 
42
  # Define Gradio input and output components
43
  image_input = gr.inputs.Image(type="numpy")
44
+ text_input = gr.inputs.Text()
45
+ output_text = gr.outputs.Textbox()
46
 
47
  # Create Gradio interface
48
  iface = gr.Interface(