File size: 2,165 Bytes
d63544f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import gradio as gr
import mediapipe as mp
import cv2
import numpy as np
from PIL import Image
from transformers import AutoTokenizer, AutoProcessor, AutoModel
model_id = "Qwen/Qwen2-VL-7B"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
model = AutoModel.from_pretrained(model_id, trust_remote_code=True, device_map="auto").eval()
mp_pose = mp.solutions.pose
def analyze_posture_by_keypoints(landmarks):
left_shoulder = landmarks.landmark[11]
right_shoulder = landmarks.landmark[12]
left_ear = landmarks.landmark[7]
right_ear = landmarks.landmark[8]
shoulder_x = (left_shoulder.x + right_shoulder.x) / 2
ear_x = (left_ear.x + right_ear.x) / 2
delta = ear_x - shoulder_x
if abs(delta) > 0.06:
return "该用户存在驼背或低头倾向,头部明显前倾。"
else:
return "该用户坐姿较为端正,头部与肩部对齐。"
def process(image: Image):
np_image = np.array(image)
with mp_pose.Pose(static_image_mode=True) as pose:
results = pose.process(cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR))
if not results.pose_landmarks:
return "❗ 无法检测到人体,请上传包含上半身的清晰坐姿照片。"
posture_analysis = analyze_posture_by_keypoints(results.pose_landmarks)
prompt = f"请根据以下坐姿描述生成中英文提醒:\n{posture_analysis}"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens=512)
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
return result
demo = gr.Interface(
fn=process,
inputs=gr.Image(type="pil", label="上传你的坐姿照片"),
outputs=gr.Textbox(label="中英文坐姿分析结果"),
title="🪑 Qwen2-VL 坐姿识别助手(修复版)",
description="融合 Mediapipe 与 Qwen2-VL 模型,判断是否驼背并生成中英文提醒。",
theme="soft",
allow_flagging="never"
)
if __name__ == "__main__":
demo.launch()
|