Vups_analyzer / app.py
smurar's picture
Create app.py
9ff3b7b verified
import cv2
import mediapipe as mp
import numpy as np
import gradio as gr
from collections import deque
mp_pose = mp.solutions.pose
pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils
def calculate_angle(a, b, c):
a, b, c = np.array(a), np.array(b), np.array(c)
radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])
angle = np.abs(np.degrees(radians))
return angle if angle <= 180 else 360 - angle
def check_vup_feedback(landmarks, angle_buffer):
left_shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
right_shoulder = [landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y]
mid_shoulder = [(left_shoulder[0] + right_shoulder[0]) / 2,
(left_shoulder[1] + right_shoulder[1]) / 2]
left_hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
right_hip = [landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y]
mid_hip = [(left_hip[0] + right_hip[0]) / 2,
(left_hip[1] + right_hip[1]) / 2]
left_knee = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]
right_knee = [landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y]
mid_knee = [(left_knee[0] + right_knee[0]) / 2,
(left_knee[1] + right_knee[1]) / 2]
angle = calculate_angle(mid_shoulder, mid_hip, mid_knee)
angle_buffer.append(angle)
smooth_angle = np.mean(angle_buffer)
accuracy = max(0, min(100, (1 - abs(smooth_angle - 90) / 30) * 100))
feedback = "Correct V-up" if smooth_angle < 120 else "Incorrect V-up - Bring your upper body and legs closer"
return feedback, int(accuracy), smooth_angle
def draw_info(image, accuracy, feedback, smooth_angle):
bar_x, bar_y = 50, image.shape[0] - 70
bar_width, bar_height = 200, 20
fill_width = int((accuracy / 100) * bar_width)
color = (0, 255, 0) if accuracy >= 80 else (0, 0, 255) if accuracy < 50 else (0, 255, 255)
cv2.rectangle(image, (bar_x, bar_y), (bar_x + bar_width, bar_y + bar_height), (200,200,200), 2)
cv2.rectangle(image, (bar_x, bar_y), (bar_x + fill_width, bar_y + bar_height), color, -1)
cv2.putText(image, f"Accuracy: {accuracy}%", (bar_x, bar_y-10),
cv2.FONT_HERSHEY_DUPLEX, 0.6, (255,255,255), 2)
cv2.putText(image, f"Angle: {int(smooth_angle)}", (bar_x, bar_y-40),
cv2.FONT_HERSHEY_DUPLEX, 0.8, (255,255,0), 2)
text_color = (0, 255, 0) if "Correct" in feedback else (0, 0, 255)
cv2.putText(image, feedback, (50, 50),
cv2.FONT_HERSHEY_COMPLEX, 1, text_color, 3)
def analyze_vups(video_path):
angle_buffer = deque(maxlen=5)
cap = cv2.VideoCapture(video_path)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) if cap.get(cv2.CAP_PROP_FPS) > 0 else 30
output_video = "output_vups.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_video, fourcc, fps, (frame_width, frame_height))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = pose.process(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.pose_landmarks:
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
landmarks = results.pose_landmarks.landmark
feedback, accuracy, smooth_angle = check_vup_feedback(landmarks, angle_buffer)
draw_info(image, accuracy, feedback, smooth_angle)
# Optionally, draw a reference line at mid-hip
left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]
right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value]
mid_hip_x = int((left_hip.x + right_hip.x) / 2 * frame_width)
mid_hip_y = int((left_hip.y + right_hip.y) / 2 * frame_height)
cv2.line(image, (mid_hip_x - 50, mid_hip_y), (mid_hip_x + 50, mid_hip_y), (255,255,255), 2)
out.write(image)
cap.release()
out.release()
return output_video
gr.Interface(
fn=analyze_vups,
inputs=gr.Video(),
outputs=gr.Video(),
title="V-ups Form Analyzer",
description="Upload a video of your V-ups and receive form feedback!"
).launch()