kkhushisaid commited on
Commit
dbd9f86
·
verified ·
1 Parent(s): d1dee8b

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +9 -5
  2. app.py +148 -0
  3. gitattributes +36 -0
  4. pneumonia_cnn_model.pt +3 -0
  5. requirements.txt +3 -0
README.md CHANGED
@@ -1,12 +1,16 @@
1
  ---
2
- title: Pneumonia Detection Using X-ray Images
3
- emoji: 💻
4
  colorFrom: yellow
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 5.23.3
8
  app_file: app.py
9
- pinned: false
 
 
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Pneumonia Detection
3
+ emoji: 🫁
4
  colorFrom: yellow
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 5.5.0
8
  app_file: app.py
9
+ pinned: true
10
+ license: apache-2.0
11
+ short_description: Pneumonia Detection
12
+ thumbnail: >-
13
+ https://cdn-uploads.huggingface.co/production/uploads/6628ccaee267e83de839e358/I72mJrksa-dGsPyfE503F.png
14
  ---
15
 
16
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torchvision import transforms as T
4
+ import os
5
+ import gradio as gr
6
+
7
+ #################################
8
+ # Define problem parameters
9
+ #################################
10
+
11
+ class config:
12
+
13
+ img_size = 224
14
+
15
+ pn_mean = [0.4752, 0.4752, 0.4752] # Pneumonia dataset mean
16
+ pn_std = [0.2234, 0.2234, 0.2234] # Pneumonia dataset std
17
+
18
+ class_names = ["Normal", "Pneumonia"]
19
+
20
+
21
+ device = torch.device('cpu')
22
+
23
+ print(f"device: {device}")
24
+
25
+ #######################################
26
+ # Define image transformation pipeline
27
+ #######################################
28
+
29
+ class Gray2RGB:
30
+ def __call__(self, image):
31
+ if image.shape[0] == 3:
32
+ return image
33
+ else:
34
+ return image.repeat(3, 1, 1) # Repeat the single channel across 3 channels to convert to RGB
35
+
36
+ test_transform_custom = T.Compose([
37
+ T.Resize(size=(config.img_size, config.img_size)),
38
+ T.ToTensor(),
39
+ Gray2RGB(),
40
+ T.Normalize(config.pn_mean, config.pn_std),
41
+ ])
42
+
43
+
44
+ #################################
45
+ # Define model architecture
46
+ #################################
47
+
48
+ class ConvolutionalNetwork(nn.Module):
49
+ def __init__(self):
50
+ super().__init__()
51
+
52
+ self.conv1 = nn.Sequential(
53
+ nn.Conv2d(3, 8, 3, stride=1, padding=1),
54
+ nn.ReLU(inplace=True),
55
+ nn.BatchNorm2d(8),
56
+ nn.MaxPool2d(2,2))
57
+
58
+ self.conv2 = nn.Sequential(
59
+ nn.Conv2d(8, 16, 3, stride=1, padding=1),
60
+ nn.ReLU(inplace=True),
61
+ nn.BatchNorm2d(16),
62
+ nn.MaxPool2d(2,2))
63
+
64
+ self.conv3 = nn.Sequential(
65
+ nn.Conv2d(16, 32, 3, stride=1, padding=1),
66
+ nn.ReLU(inplace=True),
67
+ nn.BatchNorm2d(32),
68
+ nn.MaxPool2d(2,2))
69
+
70
+ self.conv4 = nn.Sequential(
71
+ nn.Conv2d(32, 64, 3, stride=1, padding=1),
72
+ nn.ReLU(inplace=True),
73
+ nn.BatchNorm2d(64),
74
+ nn.MaxPool2d(2,2))
75
+
76
+ self.conv5 = nn.Sequential(
77
+ nn.Conv2d(64, 128, 3, stride=1, padding=1),
78
+ nn.ReLU(inplace=True),
79
+ nn.BatchNorm2d(128),
80
+ nn.MaxPool2d(2,2))
81
+
82
+ self.fc = nn.Sequential(
83
+ nn.Linear(128*7*7, 512),
84
+ nn.ReLU(inplace=True),
85
+ nn.BatchNorm1d(512),
86
+ nn.Dropout(0.5),
87
+ nn.Linear(512, 2))
88
+
89
+ def forward(self, x):
90
+ x = self.conv1(x)
91
+ x = self.conv2(x)
92
+ x = self.conv3(x)
93
+ x = self.conv4(x)
94
+ x = self.conv5(x)
95
+ x = x.view(x.shape[0], -1)
96
+ x = self.fc(x)
97
+ return x
98
+
99
+
100
+ cnn_model = ConvolutionalNetwork()
101
+
102
+ cnn_model.to(device)
103
+
104
+ status = cnn_model.load_state_dict(torch.load('pneumonia_cnn_model.pt', map_location=device, weights_only=True))
105
+ print(f"Status: {status}")
106
+
107
+ #################################
108
+ # Define the prediction fucntion
109
+ #################################
110
+
111
+ def predict(image):
112
+ """Transforms and performs a prediction on an image and returns the prediction dictionary."""
113
+
114
+ image = test_transform_custom(image).unsqueeze(0)
115
+
116
+ cnn_model.eval()
117
+ with torch.no_grad():
118
+ pred_probs = torch.softmax(cnn_model(image), dim=1)
119
+
120
+ # Create a prediction probability dictionary for each prediction class
121
+ pred_dict = {config.class_names[i]: float(pred_probs[0][i]) for i in range(len(config.class_names))}
122
+
123
+ # Return the prediction dictionary
124
+ return pred_dict
125
+
126
+ ##########################
127
+ # Create the Gradio demo
128
+ ##########################
129
+
130
+ title = "Pneumonia Detection"
131
+
132
+ description = """This is a pneumonia detection model that uses a custom convolutional neural network to predict whether an image contains pneumonia or not. \
133
+ GitHub project can be accessed [here](https://github.com/mma666/Pneumonia-Detection-Computer-Vision).
134
+ """
135
+
136
+ # Create examples list from "examples/" directory
137
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
138
+
139
+ # Create the Gradio demo
140
+ demo = gr.Interface(fn=predict,
141
+ inputs=[gr.Image(label="Upload image", type="pil", height=320, width=320)],
142
+ outputs=[gr.Label(num_top_classes=2, label="Predictions")],
143
+ examples=example_list,
144
+ title=title,
145
+ description=description,
146
+ cache_examples=False)
147
+
148
+ demo.launch()
gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ pneumonia_cnn_model.pt filter=lfs diff=lfs merge=lfs -text
pneumonia_cnn_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b06a8bac2e4b32c3fe8aa7762d8e5ed6bcc8958bd0706587948b258765e3019
3
+ size 13271698
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch==2.4.1
2
+ torchvision==0.19.1
3
+ gradio==5.5.0