README.md
CHANGED
@@ -5,7 +5,7 @@ emoji: 📈
|
|
5 |
colorFrom: blue
|
6 |
colorTo: yellow
|
7 |
sdk: gradio
|
8 |
-
sdk_version: 4.12.0
|
9 |
app_file: app.py
|
10 |
pinned: false
|
11 |
---
|
|
|
5 |
colorFrom: blue
|
6 |
colorTo: yellow
|
7 |
sdk: gradio
|
8 |
+
sdk_version: 4.12.0
|
9 |
app_file: app.py
|
10 |
pinned: false
|
11 |
---
|
app.py
CHANGED
@@ -1,121 +1,13 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
from matplotlib import gridspec
|
4 |
-
import matplotlib.pyplot as plt
|
5 |
-
import numpy as np
|
6 |
from PIL import Image
|
7 |
import requests
|
8 |
-
from transformers import AutoFeatureExtractor, SegformerForSemanticSegmentation
|
9 |
|
10 |
-
|
11 |
-
model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-cityscapes-
|
12 |
|
13 |
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
14 |
image = Image.open(requests.get(url, stream=True).raw)
|
15 |
|
16 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
17 |
outputs = model(**inputs)
|
18 |
-
logits = outputs.logits # shape (batch_size, num_labels, height/4, width/4)
|
19 |
-
|
20 |
-
def ade_palette():
|
21 |
-
"""ADE20K palette that maps each class to RGB values."""
|
22 |
-
return [
|
23 |
-
[255, 0, 0],
|
24 |
-
[255, 94, 0],
|
25 |
-
[255, 187, 0],
|
26 |
-
[255, 228, 0],
|
27 |
-
[171, 242, 0],
|
28 |
-
[29, 219, 22],
|
29 |
-
[0, 216, 255],
|
30 |
-
[0, 84, 255],
|
31 |
-
[1, 0, 255],
|
32 |
-
[95, 0, 255],
|
33 |
-
[255, 0, 221],
|
34 |
-
[255, 0, 127],
|
35 |
-
[152, 0, 0],
|
36 |
-
[153, 112, 0],
|
37 |
-
[107, 153, 0],
|
38 |
-
[0, 51, 153],
|
39 |
-
[63, 0, 153],
|
40 |
-
[153, 0, 133]
|
41 |
-
]
|
42 |
-
|
43 |
-
|
44 |
-
labels_list = []
|
45 |
-
|
46 |
-
with open(r"labels.txt", "r") as fp:
|
47 |
-
for line in fp:
|
48 |
-
labels_list.append(line[:-1])
|
49 |
-
|
50 |
-
colormap = np.asarray(ade_palette())
|
51 |
-
|
52 |
-
|
53 |
-
def label_to_color_image(label):
|
54 |
-
if label.ndim != 2:
|
55 |
-
raise ValueError("Expect 2-D input label")
|
56 |
-
|
57 |
-
if np.max(label) >= len(colormap):
|
58 |
-
raise ValueError("label value too large.")
|
59 |
-
return colormap[label]
|
60 |
-
|
61 |
-
|
62 |
-
def draw_plot(pred_img, seg):
|
63 |
-
fig = plt.figure(figsize=(20, 15))
|
64 |
-
|
65 |
-
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
|
66 |
-
|
67 |
-
plt.subplot(grid_spec[0])
|
68 |
-
plt.imshow(pred_img)
|
69 |
-
plt.axis("off")
|
70 |
-
LABEL_NAMES = np.asarray(labels_list)
|
71 |
-
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
|
72 |
-
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
|
73 |
-
|
74 |
-
unique_labels = np.unique(seg.numpy().astype("uint8"))
|
75 |
-
ax = plt.subplot(grid_spec[1])
|
76 |
-
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
|
77 |
-
ax.yaxis.tick_right()
|
78 |
-
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
|
79 |
-
plt.xticks([], [])
|
80 |
-
ax.tick_params(width=0.0, labelsize=25)
|
81 |
-
return fig
|
82 |
-
|
83 |
-
|
84 |
-
def sepia(input_img):
|
85 |
-
input_img = Image.fromarray(input_img)
|
86 |
-
|
87 |
-
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
88 |
-
outputs = model(**inputs)
|
89 |
-
logits = outputs.logits
|
90 |
-
|
91 |
-
logits = tf.transpose(logits, [0, 2, 3, 1])
|
92 |
-
logits = tf.image.resize(
|
93 |
-
logits, input_img.size[::-1]
|
94 |
-
) # We reverse the shape of `image` because `image.size` returns width and height.
|
95 |
-
seg = tf.math.argmax(logits, axis=-1)[0]
|
96 |
-
|
97 |
-
color_seg = np.zeros(
|
98 |
-
(seg.shape[0], seg.shape[1], 3), dtype=np.uint8
|
99 |
-
) # height, width, 3
|
100 |
-
for label, color in enumerate(colormap):
|
101 |
-
color_seg[seg.numpy() == label, :] = color
|
102 |
-
|
103 |
-
# Show image + mask
|
104 |
-
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
105 |
-
pred_img = pred_img.astype(np.uint8)
|
106 |
-
|
107 |
-
fig = draw_plot(pred_img, seg)
|
108 |
-
return fig
|
109 |
-
|
110 |
-
|
111 |
-
demo = gr.Interface(
|
112 |
-
fn=sepia,
|
113 |
-
inputs=gr.Image(shape=(400, 600)),
|
114 |
-
outputs=["plot"],
|
115 |
-
examples=[
|
116 |
-
"image1.jpg"],
|
117 |
-
allow_flagging="never",
|
118 |
-
)
|
119 |
-
|
120 |
-
|
121 |
-
demo.launch()
|
|
|
1 |
+
from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation
|
|
|
|
|
|
|
|
|
2 |
from PIL import Image
|
3 |
import requests
|
|
|
4 |
|
5 |
+
feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b0-finetuned-cityscapes-1024-1024")
|
6 |
+
model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-cityscapes-1024-1024")
|
7 |
|
8 |
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
9 |
image = Image.open(requests.get(url, stream=True).raw)
|
10 |
|
11 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
12 |
outputs = model(**inputs)
|
13 |
+
logits = outputs.logits # shape (batch_size, num_labels, height/4, width/4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|