nicolasRolebot commited on
Commit
091117d
·
1 Parent(s): 1d235a8

webhooks and background task

Browse files
Files changed (4) hide show
  1. app.py +39 -3
  2. config.py +7 -0
  3. tasks.py +26 -0
  4. vitpose.py +101 -0
app.py CHANGED
@@ -1,6 +1,19 @@
1
- from fastapi import FastAPI
2
-
 
 
 
 
 
 
3
  app = FastAPI()
 
 
 
 
 
 
 
4
 
5
  @app.get("/")
6
  def read_root():
@@ -8,4 +21,27 @@ def read_root():
8
 
9
  @app.get("/test")
10
  def test():
11
- return {"message": "from test"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File, Response,Header, BackgroundTasks,Body
2
+ from fastapi.staticfiles import StaticFiles
3
+ from vitpose import VitPose
4
+ import os
5
+ from dotenv import load_dotenv
6
+ from tasks import process_video
7
+ from fastapi.responses import JSONResponse
8
+ from config import API_KEY
9
  app = FastAPI()
10
+ vitpose = VitPose()
11
+ # vitpose.pipeline.warmup()
12
+
13
+ load_dotenv()
14
+
15
+
16
+ app.mount("/static", StaticFiles())
17
 
18
  @app.get("/")
19
  def read_root():
 
21
 
22
  @app.get("/test")
23
  def test():
24
+ return {"message": "from test"}
25
+
26
+ @app.post("/upload")
27
+ async def upload(background_tasks: BackgroundTasks,
28
+ file: UploadFile = File(...),
29
+ token: str = Header(...),
30
+ user_id: str = Body(...)):
31
+
32
+ if token != API_KEY:
33
+ return JSONResponse(content={"message": "Unauthorized", "status": 401})
34
+
35
+ contents = await file.read()
36
+ # Save the file to the local directory
37
+ with open(file.filename, "wb") as f:
38
+ f.write(contents)
39
+
40
+
41
+
42
+ # Create a clone of the file with content already read
43
+ background_tasks.add_task(process_video, file.filename, vitpose, user_id)
44
+
45
+ # Return the file as a response
46
+ return JSONResponse(content={"message": "Video uploaded successfully", "status": 200})
47
+
config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ API_URL = os.getenv("API_URL")
7
+ API_KEY = os.getenv("API_KEY")
tasks.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vitpose import VitPose
2
+ import requests
3
+ import os
4
+ from fastapi import UploadFile
5
+ from config import API_URL
6
+ import time
7
+
8
+ def process_video(video_path: str,vitpose: VitPose,user_id: str):
9
+
10
+ new_file_name = video_path.split(".")[0] + "edited." + video_path.split(".")[1]
11
+ new_file_name = os.path.join("static", new_file_name)
12
+
13
+ vitpose.output_video_path = new_file_name
14
+ annotated_frames = vitpose.run(video_path)
15
+ annotated_video_path = vitpose.frames_to_video(annotated_frames,rotate=True)
16
+
17
+ with open(annotated_video_path, "rb") as f:
18
+ contents = f.read()
19
+
20
+ url = API_URL+ "/excercises/webhooks/video-processed"
21
+
22
+ files = {"file": (annotated_video_path, contents, "video/mp4")}
23
+ response = requests.post(url, files=files, data={"user_id":user_id,"typeMessage":"video_processed","file_name":annotated_video_path}, stream=True)
24
+ print(response.json())
25
+ os.remove(video_path)
26
+ os.remove(annotated_video_path)
vitpose.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from rt_pose import PoseEstimationPipeline
3
+ import cv2
4
+ import supervision as sv
5
+ import numpy as np
6
+ from rt_pose import PoseEstimationPipeline, PoseEstimationOutput
7
+
8
+
9
+ class VitPose:
10
+ def __init__(self):
11
+ self.pipeline = PoseEstimationPipeline(
12
+ object_detection_checkpoint="PekingU/rtdetr_r50vd_coco_o365",
13
+ pose_estimation_checkpoint="usyd-community/vitpose-plus-small",
14
+ device="cuda" if torch.cuda.is_available() else "cpu",
15
+ dtype=torch.bfloat16,
16
+ compile=True, # or True to get more speedup
17
+ )
18
+ self.output_video_path = None
19
+ self.video_metadata = {}
20
+
21
+
22
+ def video_to_frames(self,video):
23
+ frames = []
24
+ cap = cv2.VideoCapture(video)
25
+ self.video_metadata = {
26
+ "fps": cap.get(cv2.CAP_PROP_FPS),
27
+ "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
28
+ "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
29
+ }
30
+
31
+ while cap.isOpened():
32
+ ret, frame = cap.read()
33
+ if not ret:
34
+ break
35
+ frames.append(frame)
36
+ return frames[:10]
37
+
38
+ def run(self,video):
39
+ frames = self.video_to_frames(video)
40
+ annotated_frames = []
41
+ for frame in frames:
42
+ output = self.pipeline(frame)
43
+ annotated_frame = self.visualize_output(frame,output)
44
+ annotated_frames.append(annotated_frame)
45
+ return annotated_frames
46
+
47
+
48
+
49
+ def visualize_output(self,image: np.ndarray, output: PoseEstimationOutput, confidence: float = 0.3) -> np.ndarray:
50
+ """
51
+ Visualize pose estimation output.
52
+ """
53
+ keypoints_xy = output.keypoints_xy.float().cpu().numpy()
54
+ scores = output.scores.float().cpu().numpy()
55
+
56
+ # Supervision will not draw vertices with `0` score
57
+ # and coordinates with `(0, 0)` value
58
+ invisible_keypoints = scores < confidence
59
+ scores[invisible_keypoints] = 0
60
+ keypoints_xy[invisible_keypoints] = 0
61
+
62
+ keypoints = sv.KeyPoints(xy=keypoints_xy, confidence=scores)
63
+
64
+ _, y_min, _, y_max = output.person_boxes_xyxy.T
65
+ height = int((y_max - y_min).mean().item())
66
+ radius = max(height // 100, 4)
67
+ thickness = max(height // 200, 2)
68
+ edge_annotator = sv.EdgeAnnotator(color=sv.Color.YELLOW, thickness=thickness)
69
+ vertex_annotator = sv.VertexAnnotator(color=sv.Color.ROBOFLOW, radius=radius)
70
+
71
+ annotated_frame = image.copy()
72
+ annotated_frame = edge_annotator.annotate(annotated_frame, keypoints)
73
+ annotated_frame = vertex_annotator.annotate(annotated_frame, keypoints)
74
+
75
+ return annotated_frame
76
+
77
+ def frames_to_video(self, frames, rotate=False):
78
+
79
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
80
+ height = self.video_metadata["height"]
81
+ width = self.video_metadata["width"]
82
+
83
+ # If rotation is requested, swap dimensions for the output video
84
+ if rotate:
85
+ print(f"Original dimensions: {width}x{height}, Rotated dimensions: {height}x{width}")
86
+ # For the VideoWriter, we need to specify the dimensions of the output frames
87
+ out = cv2.VideoWriter(self.output_video_path, fourcc, self.video_metadata["fps"], (height, width))
88
+ else:
89
+ print(f"Dimensions: {width}x{height}")
90
+ out = cv2.VideoWriter(self.output_video_path, fourcc, self.video_metadata["fps"], (width, height))
91
+
92
+ for frame in frames:
93
+ if rotate:
94
+ # Rotate 90 degrees clockwise
95
+ rotated_frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
96
+ out.write(rotated_frame)
97
+ else:
98
+ out.write(frame)
99
+
100
+ out.release()
101
+ return self.output_video_path