ancebuc commited on
Commit
399ea10
·
verified ·
1 Parent(s): a49c2ff
Files changed (1) hide show
  1. app.py +33 -2
app.py CHANGED
@@ -1,7 +1,38 @@
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  demo = gr.Interface(fn=greet, inputs=["image","text"], outputs="text")
7
  demo.launch()
 
1
+ from PIL import Image
2
+ import requests
3
+
4
+ import torch
5
+
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+
9
  import gradio as gr
10
 
11
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
12
+
13
+ processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
14
+ model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
15
+
16
+ def visualize_segmentation(image, prompts, preds):
17
+ _, ax = plt.subplots(1, len(prompts) + 1, figsize=(3*(len(prompts) + 1), 4))
18
+ [a.axis('off') for a in ax.flatten()]
19
+ ax[0].imshow(image)
20
+ [ax[i+1].imshow(torch.sigmoid(preds[i][0])) for i in range(len(prompts))];
21
+ [ax[i+1].text(0, -15, prompt) for i, prompt in enumerate(prompts)];
22
+
23
+
24
+ def segment(img, clases):
25
+ prompts = clases.split(',')
26
+
27
+ inputs = processor(text=prompts, images=[image] * len(img), padding="max_length", return_tensors="pt")
28
+
29
+ with torch.no_grad():
30
+ outputs = model(**inputs)
31
+ preds = outputs.logits.unsqueeze(1)
32
+
33
+
34
+
35
+ return "Hello " + prompts + "!!"
36
 
37
  demo = gr.Interface(fn=greet, inputs=["image","text"], outputs="text")
38
  demo.launch()