badguyy commited on
Commit
44c7908
·
verified ·
1 Parent(s): 5cc7f89

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import os
4
+
5
+ # Get the API token from environment variables
6
+ api_token = os.getenv("HUGGINGFACE_API_TOKEN")
7
+
8
+ # Initialize the Inference Client for your model
9
+ client = InferenceClient(
10
+ model="SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net",
11
+ token=api_token
12
+ )
13
+
14
+ def predict(image):
15
+ """
16
+ Process the uploaded image and return the segmentation result.
17
+
18
+ Args:
19
+ image: PIL Image object from Gradio input
20
+
21
+ Returns:
22
+ The segmentation result (assumed to be an image) or an error message
23
+ """
24
+ try:
25
+ # TODO: Add any necessary preprocessing here (e.g., resizing, normalization)
26
+ # Send the image to the model via the Inference API
27
+ result = client.post(data={"inputs": image})
28
+ # TODO: Add any necessary postprocessing here (e.g., converting to image, overlaying on original)
29
+ # For now, assuming the result is directly the segmentation image
30
+ return result
31
+ except Exception as e:
32
+ return f"Error: {str(e)}"
33
+
34
+ # Create the Gradio interface
35
+ iface = gr.Interface(
36
+ fn=predict,
37
+ inputs=gr.Image(type="pil", label="Upload Panoramic X-ray Image"),
38
+ outputs=gr.Image(type="pil", label="Segmentation Result"),
39
+ title="Teeth Segmentation in Panoramic X-rays",
40
+ description="Upload an X-ray image to see the segmented teeth."
41
+ )
42
+
43
+ # Launch the interface
44
+ iface.launch()