File size: 3,091 Bytes
ea326f6
 
1709591
ea326f6
09f3d1b
ea326f6
fab2111
 
 
 
 
 
ea326f6
fab2111
 
 
 
ea326f6
fab2111
 
ea326f6
248a469
 
 
 
 
 
 
 
 
 
 
e79863e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248a469
 
 
e79863e
248a469
fab2111
 
 
 
ad04ec5
 
248a469
 
 
 
e79863e
 
fab2111
 
248a469
 
e79863e
0f67bbb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import cv2
from fastai.vision.all import *
import numpy as np
import gradio as gr
from scipy import ndimage

fnames = get_image_files("./albumentations/original")
def label_func(fn): return "./albumentations/labelled/"f"{fn.stem}.png"
codes = np.loadtxt('labels.txt', dtype=str)
w, h = 768, 1152
img_size = (w,h)
im_size = (h,w)

dls = SegmentationDataLoaders.from_label_func(
    ".", bs=3, fnames = fnames, label_func = label_func, codes = codes,
    item_tfms=Resize(img_size)
)

learn = unet_learner(dls, resnet34)
learn.load('learn')

def segmentImage(img_path):
    img = cv2.imread(img_path, 0)
    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            if img[i][j] > 0:
                img[i][j] = 1
    kernel = np.ones((3,3), np.uint8)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=1)
    img = ndimage.binary_fill_holes(img).astype(int)
    labels, nlabels = ndimage.label(img)

    # Get grain sizes
    sizes = ndimage.sum(img, labels, range(nlabels + 1))
    scale_factor = 3072 / 1152
    c = 0.4228320313
    # Divide sizes by pixel_to_micrometer to get the sizes in micrometers and store them in a list new_sizes
    new_sizes = [size * scale_factor * scale_factor * c * c for size in sizes]
    # Round the grain sizes to 2 decimal places
    new_sizes = [round(size, 2) for size in new_sizes]
    gradient_img = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
    colors = []
    for i in range(len(new_sizes)):
        if new_sizes[i] < 250 * c * c:
            colors.append((255, 255, 255))
        elif new_sizes[i] < 7500 * c * c:
            colors.append((2, 106, 248))
        elif new_sizes[i] < 20000 * c * c:
            colors.append((0, 255, 107))
        elif new_sizes[i] < 45000 * c * c:
            colors.append((255, 201, 60))
        else:
            colors.append((255, 0, 0))
    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            if labels[i][j] != 0:
                gradient_img[i][j] = colors[labels[i][j]]
    colors = np.random.randint(0, 255, (nlabels + 1, 3))
    colors[0] = 0
    img_color = colors[labels]
    return img_color, gradient_img

def predict_segmentation(img):
    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    resized_img = cv2.resize(gray_img, im_size)
    pred = learn.predict(resized_img)
    scaled_pred = (pred[0].numpy() * 255).astype(np.uint8)
    output_image = PILImage.create(scaled_pred)
    # Save the image to a temporary file
    temp_file = 'temp.png'
    output_image.save(temp_file)
    # Call the segmentImage function
    segmented_image, gradient_image = segmentImage(temp_file)
    return output_image, segmented_image, gradient_image

input_image = gr.inputs.Image()
output_image1 = gr.outputs.Image(type='pil')
output_image2 = gr.outputs.Image(type='pil')
app = gr.Interface(fn=predict_segmentation, inputs=input_image, outputs=[output_image1, output_image2, output_image3], title='Microstructure Segmentation', description='Segment the input image into grain and background.')
app.launch()