File size: 5,061 Bytes
92d14a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122

import subprocess
import os  
if os.getenv('SYSTEM') == 'spaces':
    subprocess.call('pip install tensorflow==2.9'.split())
    subprocess.call('pip install keras==2.9'.split())
    subprocess.call('pip install git+https://github.com/facebookresearch/segment-anything.git')
    subprocess.call('pip install opencv-python-headless==4.5.5.64'.split())
    subprocess.call('pip install git+https://github.com/cocodataset/panopticapi.git'.split())

import gradio as gr
from huggingface_hub import snapshot_download
import cv2 
import dotenv 
dotenv.load_dotenv()
import numpy as np
import gradio as gr
import glob
from inference_sam import segmentation_sam

import pathlib

if not os.path.exists('images'):
    REPO_ID='Serrelab/image_examples_gradio'
    snapshot_download(repo_id=REPO_ID, token=os.environ.get('READ_TOKEN'),repo_type='dataset',local_dir='images')


def segment_image(input_image):
    img = segmentation_sam(input_image)
    return img

def classify_image(input_image, model_name):
    if 'Rock 170' ==model_name:
        from inference_resnet import inference_resnet_finer
        result = inference_resnet_finer(input_image,model_name,n_classes=171)
        return result 
    elif 'Mummified 170' ==model_name:
        from inference_resnet import inference_resnet_finer
        result = inference_resnet_finer(input_image,model_name,n_classes=170)
        return result 
    if 'Fossils 19' ==model_name:
        from inference_beit import inference_dino
        return inference_dino(input_image,model_name)
    return None

def find_closest(input_image):
    return None 


with gr.Blocks(theme='sudeepshouche/minimalist') as demo:
    
    with gr.Tab(" 19 Classes Support"):
    
        with gr.Row():
            with gr.Column():
                input_image = gr.Image(label="Input")
                classify_image_button = gr.Button("Classify Image")
            
            with gr.Column():
                segmented_image = gr.outputs.Image(label="SAM output",type='numpy')
                segment_button = gr.Button("Segment Image")
                #classify_segmented_button = gr.Button("Classify Segmented Image")
                
            with gr.Column():
                drop_2 = gr.Dropdown(
                    ["Mummified 170", "Rock 170", "Fossils 19"],
                    multiselect=False,
                    value=["Rock 170"],
                    label="Model",
                    interactive=True,
                )
                class_predicted = gr.Label(label='Class Predicted',num_top_classes=10)
               
        with gr.Row():
           
            paths = sorted(pathlib.Path('images/').rglob('*.jpg'))
            samples=[[path.as_posix()] for path in paths  if 'fossils' in  str(path) ][:19]
            examples_fossils = gr.Examples(samples, inputs=input_image,examples_per_page=10,label='Fossils Examples from the dataset')
            samples=[[path.as_posix()] for path in paths  if 'leaves' in  str(path) ][:19]
            examples_leaves = gr.Examples(samples, inputs=input_image,examples_per_page=5,label='Leaves Examples from the dataset')
            
        with gr.Accordion("Using Diffuser"):
            with gr.Column(): 
                prompt = gr.Textbox(lines=1, label="Prompt")
                output_image = gr.Image(label="Output")
                generate_button = gr.Button("Generate Leave")    
            with gr.Column():
                class_predicted2 = gr.Label(label='Class Predicted from diffuser')
                classify_button = gr.Button("Classify Image")
            
        
        with gr.Accordion("Explanations "):
            gr.Markdown("Computing Explanations from the model")
            with gr.Row():
                original_input = gr.Image(label="Original Frame")
                saliency  = gr.Image(label="saliency")
                gradcam = gr.Image(label='gradcam')
                guided_gradcam = gr.Image(label='guided gradcam')
                guided_backprop = gr.Image(label='guided backprop')
            generate_explanations = gr.Button("Generate Explanations")
                
        with gr.Accordion('Closest Images'):
            gr.Markdown("Finding the closest images in the dataset")
            with gr.Row():
                closest_image_0 = gr.Image(label='Closest Image')
                closest_image_1 = gr.Image(label='Second Closest Image')
                closest_image_2 = gr.Image(label='Third Closest Image')
                closest_image_3 = gr.Image(label='Forth Closest Image')
                closest_image_4 = gr.Image(label='Fifth Closest Image')
            find_closest_btn = gr.Button("Find Closest Images")
            
        segment_button.click(segment_image, inputs=input_image, outputs=segmented_image)
        classify_image_button.click(classify_image, inputs=[input_image,drop_2], outputs=class_predicted)
        #classify_segmented_button.click(classify_image, inputs=[segmented_image,drop_2], outputs=class_predicted)
        
        
   
demo.launch(debug=True)