Mariam-Elz commited on
Commit
27d82ef
·
verified ·
1 Parent(s): 3bb59c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -46
app.py CHANGED
@@ -79,62 +79,62 @@
79
  # demo.launch()
80
  ########################3rd-MAIN######################3
81
 
82
- import torch
83
- import gradio as gr
84
- import requests
85
- import os
86
 
87
- # Download model weights from Hugging Face model repo (if not already present)
88
- model_repo = "Mariam-Elz/CRM" # Your Hugging Face model repo
89
 
90
- model_files = {
91
- "ccm-diffusion.pth": "ccm-diffusion.pth",
92
- "pixel-diffusion.pth": "pixel-diffusion.pth",
93
- "CRM.pth": "CRM.pth",
94
- }
95
 
96
- os.makedirs("models", exist_ok=True)
97
 
98
- for filename, output_path in model_files.items():
99
- file_path = f"models/{output_path}"
100
- if not os.path.exists(file_path):
101
- url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}"
102
- print(f"Downloading {filename}...")
103
- response = requests.get(url)
104
- with open(file_path, "wb") as f:
105
- f.write(response.content)
106
 
107
- # Load model (This part depends on how the model is defined)
108
- device = "cuda" if torch.cuda.is_available() else "cpu"
109
 
110
- def load_model():
111
- model_path = "models/CRM.pth"
112
- model = torch.load(model_path, map_location=device)
113
- model.eval()
114
- return model
115
 
116
- model = load_model()
117
 
118
- # Define inference function
119
- def infer(image):
120
- """Process input image and return a reconstructed image."""
121
- with torch.no_grad():
122
- # Assuming model expects a tensor input
123
- image_tensor = torch.tensor(image).to(device)
124
- output = model(image_tensor)
125
- return output.cpu().numpy()
126
 
127
- # Create Gradio UI
128
- demo = gr.Interface(
129
- fn=infer,
130
- inputs=gr.Image(type="numpy"),
131
- outputs=gr.Image(type="numpy"),
132
- title="Convolutional Reconstruction Model",
133
- description="Upload an image to get the reconstructed output."
134
- )
135
 
136
- if __name__ == "__main__":
137
- demo.launch()
138
 
139
 
140
  #################4th##################
@@ -264,3 +264,74 @@ if __name__ == "__main__":
264
 
265
  # if __name__ == "__main__":
266
  # demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  # demo.launch()
80
  ########################3rd-MAIN######################3
81
 
82
+ # import torch
83
+ # import gradio as gr
84
+ # import requests
85
+ # import os
86
 
87
+ # # Download model weights from Hugging Face model repo (if not already present)
88
+ # model_repo = "Mariam-Elz/CRM" # Your Hugging Face model repo
89
 
90
+ # model_files = {
91
+ # "ccm-diffusion.pth": "ccm-diffusion.pth",
92
+ # "pixel-diffusion.pth": "pixel-diffusion.pth",
93
+ # "CRM.pth": "CRM.pth",
94
+ # }
95
 
96
+ # os.makedirs("models", exist_ok=True)
97
 
98
+ # for filename, output_path in model_files.items():
99
+ # file_path = f"models/{output_path}"
100
+ # if not os.path.exists(file_path):
101
+ # url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}"
102
+ # print(f"Downloading {filename}...")
103
+ # response = requests.get(url)
104
+ # with open(file_path, "wb") as f:
105
+ # f.write(response.content)
106
 
107
+ # # Load model (This part depends on how the model is defined)
108
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
109
 
110
+ # def load_model():
111
+ # model_path = "models/CRM.pth"
112
+ # model = torch.load(model_path, map_location=device)
113
+ # model.eval()
114
+ # return model
115
 
116
+ # model = load_model()
117
 
118
+ # # Define inference function
119
+ # def infer(image):
120
+ # """Process input image and return a reconstructed image."""
121
+ # with torch.no_grad():
122
+ # # Assuming model expects a tensor input
123
+ # image_tensor = torch.tensor(image).to(device)
124
+ # output = model(image_tensor)
125
+ # return output.cpu().numpy()
126
 
127
+ # # Create Gradio UI
128
+ # demo = gr.Interface(
129
+ # fn=infer,
130
+ # inputs=gr.Image(type="numpy"),
131
+ # outputs=gr.Image(type="numpy"),
132
+ # title="Convolutional Reconstruction Model",
133
+ # description="Upload an image to get the reconstructed output."
134
+ # )
135
 
136
+ # if __name__ == "__main__":
137
+ # demo.launch()
138
 
139
 
140
  #################4th##################
 
264
 
265
  # if __name__ == "__main__":
266
  # demo.launch()
267
+
268
+
269
+ #############6th##################
270
+ import torch
271
+ import gradio as gr
272
+ import requests
273
+ import os
274
+ import numpy as np
275
+
276
+ # Hugging Face Model Repository
277
+ model_repo = "Mariam-Elz/CRM"
278
+
279
+ # Download Model Weights (Only CRM.pth to Save Memory)
280
+ model_path = "models/CRM.pth"
281
+ os.makedirs("models", exist_ok=True)
282
+
283
+ if not os.path.exists(model_path):
284
+ url = f"https://huggingface.co/{model_repo}/resolve/main/CRM.pth"
285
+ print(f"Downloading CRM.pth...")
286
+ response = requests.get(url)
287
+ with open(model_path, "wb") as f:
288
+ f.write(response.content)
289
+
290
+ # Set Device (Use CPU to Reduce RAM Usage)
291
+ device = "cpu"
292
+
293
+ # Load Model Efficiently
294
+ def load_model():
295
+ model = torch.load(model_path, map_location=device)
296
+ if isinstance(model, torch.nn.Module):
297
+ model.eval() # Ensure model is in inference mode
298
+ return model
299
+
300
+ # Load model only when needed (saves memory)
301
+ model = load_model()
302
+
303
+ # Define Inference Function with Memory Optimizations
304
+ def infer(image):
305
+ """Process input image and return a reconstructed image."""
306
+ with torch.no_grad():
307
+ # Convert image to torch tensor & normalize (float16 to save RAM)
308
+ image_tensor = torch.tensor(image, dtype=torch.float16).unsqueeze(0).permute(0, 3, 1, 2) / 255.0
309
+ image_tensor = image_tensor.to(device)
310
+
311
+ # Model Inference
312
+ output = model(image_tensor)
313
+
314
+ # Convert back to numpy image format
315
+ output_image = output.squeeze(0).permute(1, 2, 0).cpu().numpy() * 255.0
316
+ output_image = np.clip(output_image, 0, 255).astype(np.uint8)
317
+
318
+ # Free Memory
319
+ del image_tensor, output
320
+ torch.cuda.empty_cache()
321
+
322
+ return output_image
323
+
324
+ # Create Gradio UI
325
+ demo = gr.Interface(
326
+ fn=infer,
327
+ inputs=gr.Image(type="numpy"),
328
+ outputs=gr.Image(type="numpy"),
329
+ title="Optimized Convolutional Reconstruction Model",
330
+ description="Upload an image to get the reconstructed output with reduced memory usage."
331
+ )
332
+
333
+ if __name__ == "__main__":
334
+ demo.launch()
335
+
336
+
337
+