File size: 1,521 Bytes
5066aaa fa0722e 5066aaa fa0722e 5066aaa fa0722e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import os
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow import keras
import requests
import PIL
import io
import matplotlib.pyplot as plt
from keras_cv_attention_models import convnext
import gradio as gr
mm = convnext.ConvNeXtBase()
def(img):
img = img
imm = keras.applications.imagenet_utils.preprocess_input(img, mode='torch')
image_input = tf.expand_dims(tf.image.resize(imm, mm.input_shape[1:3]), 0)
pred = mm(image_input)
pred_np = pred.numpy()
pred_names = keras.applications.imagenet_utils.decode_predictions(pred.numpy())[0]
result = {}
for i in range(5):
result[pred_names[i][1]] = int(100*pred_names[i][2]).item()
return result
inputs = gr.inputs.Image(type='numpy')
outputs = gr.outputs.Label(type="confidences",num_top_classes=5)
title = "MOBILENET V2"
description = "Gradio demo for MOBILENET V2, Efficient networks optimized for speed and memory, with residual blocks. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1801.04381'>MobileNetV2: Inverted Residuals and Linear Bottlenecks</a> | <a href='https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py'>Github Repo</a></p>"
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, analytics_enabled=False).launch() |