Miquel Farré commited on
Commit
3a79668
·
1 Parent(s): b0d1f70
Files changed (2) hide show
  1. app.py +283 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ from pathlib import Path
5
+ import gradio as gr
6
+ import tempfile
7
+ import os
8
+ import shutil
9
+
10
+ def edge_directed_antialiasing(img, power=2.0):
11
+ """
12
+ Apply edge-directed anti-aliasing with adjustable power
13
+
14
+ Parameters:
15
+ - img: Input image (numpy array)
16
+ - power: Anti-aliasing strength (1.0 is standard, higher values increase the effect)
17
+
18
+ Returns:
19
+ - Output image with anti-aliasing applied
20
+ """
21
+ # If image has alpha channel, separate it
22
+ has_alpha = img.shape[2] == 4 if len(img.shape) > 2 else False
23
+ if has_alpha:
24
+ bgr = img[:, :, :3]
25
+ alpha = img[:, :, 3]
26
+ else:
27
+ bgr = img
28
+ # Create binary mask from grayscale image if no alpha
29
+ gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
30
+ _, alpha = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
31
+
32
+ # Convert to grayscale for edge detection
33
+ gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
34
+
35
+ # Step 1: Detect edges using Canny
36
+ # Lower thresholds to catch more edges when power is high
37
+ canny_threshold1 = int(100 / power) # Lower threshold when power is high
38
+ canny_threshold2 = int(200 / power) # Lower threshold when power is high
39
+ edges = cv2.Canny(gray, canny_threshold1, canny_threshold2)
40
+
41
+ # Dilate edges more when power is high
42
+ kernel_size = int(3 * power) # Increase kernel size with power
43
+ kernel_size = max(3, kernel_size if kernel_size % 2 == 1 else kernel_size + 1) # Ensure odd kernel size
44
+ kernel = np.ones((kernel_size, kernel_size), np.uint8)
45
+
46
+ # More iterations for higher power
47
+ dilation_iterations = max(1, int(power))
48
+ dilated_edges = cv2.dilate(edges, kernel, iterations=dilation_iterations)
49
+
50
+ # Step 2: Calculate gradient direction using Sobel
51
+ # Increase kernel size for higher power
52
+ sobel_ksize = 3
53
+ if power > 2.0:
54
+ sobel_ksize = 5
55
+ if power > 3.0:
56
+ sobel_ksize = 7
57
+
58
+ sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_ksize)
59
+ sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_ksize)
60
+
61
+ # Calculate gradient magnitude and direction
62
+ magnitude = np.sqrt(sobelx**2 + sobely**2)
63
+ direction = np.arctan2(sobely, sobelx) * 180 / np.pi
64
+
65
+ # Create output image, starting with the original
66
+ output = bgr.copy()
67
+ h, w = output.shape[:2]
68
+
69
+ # Step 3: Apply targeted smoothing along edge directions
70
+ # Sample farther away for higher power
71
+ radius = max(1, int(power))
72
+
73
+ edge_pixels = np.where(dilated_edges > 0)
74
+ for y, x in zip(edge_pixels[0], edge_pixels[1]):
75
+ # Skip border pixels
76
+ if x < radius or y < radius or x >= w-radius or y >= h-radius:
77
+ continue
78
+
79
+ # Get local direction (perpendicular to gradient)
80
+ local_dir = direction[y, x] + 90
81
+ if local_dir > 180:
82
+ local_dir -= 360
83
+
84
+ # Normalize direction to 0-180 degrees
85
+ local_dir = ((local_dir + 180) % 180)
86
+
87
+ # Determine interpolation direction based on edge angle
88
+ if 22.5 <= local_dir < 67.5: # ~45 degree diagonal
89
+ # Diagonal top-left to bottom-right
90
+ neighbors = [(y-radius, x-radius), (y+radius, x+radius)]
91
+ weights = [0.5, 0.5]
92
+ elif 67.5 <= local_dir < 112.5: # Vertical
93
+ # Top to bottom
94
+ neighbors = [(y-radius, x), (y+radius, x)]
95
+ weights = [0.5, 0.5]
96
+ elif 112.5 <= local_dir < 157.5: # ~135 degree diagonal
97
+ # Diagonal top-right to bottom-left
98
+ neighbors = [(y-radius, x+radius), (y+radius, x-radius)]
99
+ weights = [0.5, 0.5]
100
+ else: # Horizontal
101
+ # Left to right
102
+ neighbors = [(y, x-radius), (y, x+radius)]
103
+ weights = [0.5, 0.5]
104
+
105
+ # Only interpolate if we're between different colors (at the border)
106
+ center_value = gray[y, x]
107
+ neighbor_values = [gray[ny, nx] for ny, nx in neighbors]
108
+
109
+ # Lower contrast threshold when power is high
110
+ contrast_threshold = int(50 / power)
111
+
112
+ # Check if this is an edge between very different values
113
+ if abs(neighbor_values[0] - neighbor_values[1]) > contrast_threshold:
114
+ # Apply interpolation based on local contrast
115
+ for c in range(3): # RGB channels
116
+ weighted_sum = sum(weights[i] * bgr[ny, nx, c] for i, (ny, nx) in enumerate(neighbors))
117
+ # More interpolation weight when power is high
118
+ blend_factor = min(0.9, 0.3 * power)
119
+ # Apply it with a blend factor to preserve some original detail
120
+ output[y, x, c] = int((1-blend_factor) * weighted_sum + blend_factor * bgr[y, x, c])
121
+
122
+ # Update alpha channel with the same smoothing for edges
123
+ if has_alpha:
124
+ new_alpha = alpha.copy()
125
+
126
+ # Apply a specific smoothing to the alpha channel's edges
127
+ alpha_edges = cv2.Canny(alpha, int(100/power), int(200/power))
128
+
129
+ # More dilation iterations for stronger effect
130
+ alpha_dilation_iter = max(2, int(power * 2))
131
+ dilated_alpha_edges = cv2.dilate(alpha_edges, kernel, iterations=alpha_dilation_iter)
132
+
133
+ # Radius for sampling neighborhood
134
+ alpha_radius = max(2, int(power * 2))
135
+
136
+ # For each edge pixel in alpha
137
+ alpha_edge_pixels = np.where(dilated_alpha_edges > 0)
138
+ for y, x in zip(alpha_edge_pixels[0], alpha_edge_pixels[1]):
139
+ if x < alpha_radius or y < alpha_radius or x >= w-alpha_radius or y >= h-alpha_radius:
140
+ continue
141
+
142
+ # Use a larger neighborhood for better smoothing of alpha edges
143
+ # Size increases with power
144
+ window_radius = alpha_radius
145
+ neighborhood = alpha[y-window_radius:y+window_radius+1, x-window_radius:x+window_radius+1].astype(np.float32)
146
+
147
+ # Generate gaussian-like weights based on distance from center
148
+ kernel_size = 2 * window_radius + 1
149
+ weight_matrix = np.zeros((kernel_size, kernel_size), dtype=np.float32)
150
+
151
+ # Create distance-based weights
152
+ center = window_radius
153
+ for wy in range(kernel_size):
154
+ for wx in range(kernel_size):
155
+ # Calculate distance from center
156
+ dist = np.sqrt((wy - center)**2 + (wx - center)**2)
157
+ # Adjust falloff based on power
158
+ falloff = 1.0 / power
159
+ # Gaussian-like weight
160
+ weight_matrix[wy, wx] = np.exp(-(dist**2) / (2 * (window_radius * falloff)**2))
161
+
162
+ # Normalize weights
163
+ weight_matrix = weight_matrix / weight_matrix.sum()
164
+
165
+ # Apply weighted average
166
+ new_alpha[y, x] = int(np.sum(neighborhood * weight_matrix))
167
+
168
+ # Merge BGR with new alpha
169
+ output = np.dstack([output, new_alpha])
170
+
171
+ return output
172
+
173
+ def save_as_jpg(img, file_path):
174
+ """
175
+ Save image as JPG with high quality
176
+ """
177
+ # If image has alpha channel, blend with white background
178
+ if len(img.shape) > 2 and img.shape[2] == 4:
179
+ bgr = img[:, :, :3]
180
+ alpha = img[:, :, 3].astype(float) / 255
181
+
182
+ # Create white background
183
+ bg = np.ones_like(bgr) * 255
184
+
185
+ # Blend with background
186
+ alpha = np.expand_dims(alpha, axis=2)
187
+ alpha = np.repeat(alpha, 3, axis=2)
188
+ result = (bgr * alpha + bg * (1 - alpha)).astype(np.uint8)
189
+ else:
190
+ result = img
191
+
192
+ # Save as JPG
193
+ cv2.imwrite(file_path, result, [cv2.IMWRITE_JPEG_QUALITY, 95])
194
+ return file_path
195
+
196
+ def create_output_dirs():
197
+ """Create necessary output directories"""
198
+ output_dir = os.path.join(tempfile.gettempdir(), "antialiasing_output")
199
+ os.makedirs(output_dir, exist_ok=True)
200
+ return output_dir
201
+
202
+ def process_image(input_image):
203
+ """
204
+ Process image function for Gradio interface
205
+ """
206
+ # Create output directory for our files
207
+ output_dir = create_output_dirs()
208
+
209
+ # Convert from RGB (Gradio) to BGR (OpenCV)
210
+ img_bgr = cv2.cvtColor(input_image, cv2.COLOR_RGB2BGR)
211
+
212
+ # Apply edge directed anti-aliasing with power=2.0
213
+ processed_bgr = edge_directed_antialiasing(img_bgr, power=2.0)
214
+
215
+ # Save the processed image explicitly as JPG
216
+ jpg_path = os.path.join(output_dir, "antialiased_image.jpg")
217
+ save_as_jpg(processed_bgr, jpg_path)
218
+
219
+ # Convert back to RGB for display in Gradio
220
+ if processed_bgr.shape[2] == 4: # Has alpha channel
221
+ # Blend with white background
222
+ bg = np.ones_like(processed_bgr[:,:,:3]) * 255
223
+ alpha = processed_bgr[:,:,3]
224
+ alpha_norm = alpha.astype(float) / 255
225
+ alpha_norm = np.expand_dims(alpha_norm, axis=2)
226
+ alpha_norm = np.repeat(alpha_norm, 3, axis=2)
227
+
228
+ processed_rgb = processed_bgr[:,:,:3] * alpha_norm + bg * (1 - alpha_norm)
229
+ processed_rgb = processed_rgb.astype(np.uint8)
230
+ else:
231
+ processed_rgb = cv2.cvtColor(processed_bgr, cv2.COLOR_BGR2RGB)
232
+
233
+ # Create comparison visualization
234
+ h, w = input_image.shape[:2]
235
+ dpi = 100
236
+ plt.figure(figsize=(w*2/dpi, h/dpi), dpi=dpi)
237
+
238
+ plt.subplot(1, 2, 1)
239
+ plt.imshow(input_image)
240
+ plt.title("Original")
241
+ plt.axis('off')
242
+
243
+ plt.subplot(1, 2, 2)
244
+ plt.imshow(processed_rgb)
245
+ plt.title("Anti-aliased (Power = 2.0)")
246
+ plt.axis('off')
247
+
248
+ plt.tight_layout()
249
+
250
+ # Save the comparison
251
+ comparison_file = os.path.join(output_dir, "comparison.jpg")
252
+ plt.savefig(comparison_file, dpi=dpi, bbox_inches='tight')
253
+ plt.close()
254
+
255
+ return processed_rgb, jpg_path, comparison_file
256
+
257
+ # Create Gradio interface
258
+ with gr.Blocks(title="Edge-Directed Anti-Aliasing") as app:
259
+ gr.Markdown("# Edge-Directed Anti-Aliasing Tool")
260
+ gr.Markdown("Upload an image and apply edge-directed anti-aliasing to smooth jagged edges.")
261
+
262
+ with gr.Row():
263
+ input_image = gr.Image(label="Upload Image", type="numpy")
264
+ output_image = gr.Image(label="Anti-Aliased Result", type="numpy")
265
+
266
+ with gr.Row():
267
+ process_button = gr.Button("Apply Anti-Aliasing (Power = 2.0)")
268
+
269
+ with gr.Row():
270
+ download_jpg = gr.File(label="Download Anti-Aliased JPG", type="filepath")
271
+ comparison_view = gr.Image(label="Comparison", type="filepath")
272
+
273
+ # Process button functionality
274
+ process_button.click(
275
+ fn=process_image,
276
+ inputs=[input_image],
277
+ outputs=[output_image, download_jpg, comparison_view]
278
+ )
279
+
280
+
281
+ # Launch the app
282
+ if __name__ == "__main__":
283
+ app.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ opencv-python
3
+ numpy
4
+ matplotlib