Spaces:
Runtime error
Runtime error
File size: 1,236 Bytes
c4e19e5 a1deaf2 68a1daf a1deaf2 c4e19e5 a1deaf2 c4e19e5 f7ffff8 a1deaf2 e6377a6 a1deaf2 68a1daf c4e19e5 a1deaf2 c4e19e5 68a1daf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import tensorflow as tf
import gradio as gr
import numpy as np
from keras.models import load_model
from gradio.components import Image
from model import get_model
def autocontrast(tensor, cutoff=0):
tensor = tf.cast(tensor, dtype=tf.float32)
min_val = tf.reduce_min(tensor)
max_val = tf.reduce_max(tensor)
range_val = max_val - min_val
adjusted_tensor = tf.clip_by_value(tf.cast(tf.round((tensor - min_val - cutoff) * (255 / (range_val - 2 * cutoff))), tf.uint8), 0, 255)
return adjusted_tensor
def read_image(image):
image = autocontrast(image)
image.set_shape([None, None, 3])
image = tf.cast(image, dtype=tf.float32) / 255
return image
model = get_model()
model.load_weights("./model.h5")
def enhance_image(input_image):
# Process the input image using the loaded model
image = read_image(input_image)
image = np.expand_dims(image, axis=0)
output_image = model.predict(image)
generated_image = np.squeeze(output_image, axis=0)
generated_image = tf.keras.preprocessing.image.array_to_img(generated_image)
# Return the output image
return generated_image
inputs = Image()
outputs = Image()
app = gr.Interface(enhance_image, inputs, outputs)
app.launch() |