Spaces:
Sleeping
Sleeping
Commit
·
08ec402
1
Parent(s):
d3f8823
fix issues
Browse files
main.py
CHANGED
@@ -14,7 +14,7 @@ import torch
|
|
14 |
import logging
|
15 |
from typing import List
|
16 |
import httpx
|
17 |
-
|
18 |
|
19 |
app = FastAPI()
|
20 |
|
@@ -61,6 +61,23 @@ model_pipelines = {}
|
|
61 |
base_model = ResNet("resnet152", num_output_neurons=2).to(device)
|
62 |
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
@app.on_event("startup")
|
65 |
async def load_models():
|
66 |
# Charger les modèles au démarrage
|
@@ -165,25 +182,6 @@ class BatchPredictRequest(BaseModel):
|
|
165 |
# # Return the results as JSON
|
166 |
# return JSONResponse(content={"results": results})
|
167 |
|
168 |
-
from concurrent.futures import ProcessPoolExecutor
|
169 |
-
|
170 |
-
|
171 |
-
def process_single_image(image_url, model):
|
172 |
-
try:
|
173 |
-
response = requests.get(image_url)
|
174 |
-
image = Image.open(BytesIO(response.content))
|
175 |
-
processed_image = process_image(image, size=image_size)
|
176 |
-
image_tensor = transforms.ToTensor()(processed_image).unsqueeze(0)
|
177 |
-
|
178 |
-
with torch.no_grad():
|
179 |
-
outputs = model(image_tensor)
|
180 |
-
probabilities = torch.nn.functional.softmax(outputs, dim=1)
|
181 |
-
predicted_probabilities = probabilities.numpy().tolist()
|
182 |
-
confidence = round(predicted_probabilities[0][1], 2)
|
183 |
-
return {"imageUrl": image_url, "confidence": confidence}
|
184 |
-
except Exception as e:
|
185 |
-
return {"imageUrl": image_url, "error": str(e)}
|
186 |
-
|
187 |
|
188 |
@app.post("/batch_predict")
|
189 |
async def batch_predict(request: BatchPredictRequest):
|
|
|
14 |
import logging
|
15 |
from typing import List
|
16 |
import httpx
|
17 |
+
from concurrent.futures import ProcessPoolExecutor
|
18 |
|
19 |
app = FastAPI()
|
20 |
|
|
|
61 |
base_model = ResNet("resnet152", num_output_neurons=2).to(device)
|
62 |
|
63 |
|
64 |
+
def process_single_image(image_url, model):
|
65 |
+
try:
|
66 |
+
response = requests.get(image_url)
|
67 |
+
image = Image.open(BytesIO(response.content))
|
68 |
+
processed_image = process_image(image, size=image_size)
|
69 |
+
image_tensor = transforms.ToTensor()(processed_image).unsqueeze(0)
|
70 |
+
|
71 |
+
with torch.no_grad():
|
72 |
+
outputs = model(image_tensor)
|
73 |
+
probabilities = torch.nn.functional.softmax(outputs, dim=1)
|
74 |
+
predicted_probabilities = probabilities.numpy().tolist()
|
75 |
+
confidence = round(predicted_probabilities[0][1], 2)
|
76 |
+
return {"imageUrl": image_url, "confidence": confidence}
|
77 |
+
except Exception as e:
|
78 |
+
return {"imageUrl": image_url, "error": str(e)}
|
79 |
+
|
80 |
+
|
81 |
@app.on_event("startup")
|
82 |
async def load_models():
|
83 |
# Charger les modèles au démarrage
|
|
|
182 |
# # Return the results as JSON
|
183 |
# return JSONResponse(content={"results": results})
|
184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
|
186 |
@app.post("/batch_predict")
|
187 |
async def batch_predict(request: BatchPredictRequest):
|