.. Cover Letter

ㅇ 프로젝트/TEAM_운동보조프로그램

4. 좌표로 접근을 해보자. (실험)

BrainKimDu 2023. 1. 16. 20:48

 

해당 코드를 통해서 특정 자세를 취하고 

 

import cv2
import mediapipe as mp
import numpy as np
import pandas as pd
import datetime as dt


def return_today():
    year = dt.datetime.now().year
    month = dt.datetime.now().month
    day = dt.datetime.now().day
    hour = dt.datetime.now().hour
    minute = dt.datetime.now().minute
    second = dt.datetime.now().second
        
    return str(year) + "_"  + str(month) + "_" +\
           str(day) + "_"  + str(hour) + "_"  +\
           str(minute) + "_"  + str(second)


mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose

col_list = ["shoulder_x", "shoulder_y", 
            "hip_x", "hip_y", 
            "knee_x", "knee_y",
           "ankle_x", "ankle_y",
           "labels"]

           # labels 추가 해줘야함.

df = pd.DataFrame(columns = col_list)
count = 0


cap = cv2.VideoCapture(0)
with mp_pose.Pose(
    min_detection_confidence=0.5,
    min_tracking_confidence=0.5) as pose:
  while cap.isOpened():
    success, image = cap.read()
    if not success:
      print("Ignoring empty camera frame.")
      # If loading a video, use 'break' instead of 'continue'.
      continue

    # To improve performance, optionally mark the image as not writeable to
    # pass by reference.
    image.flags.writeable = False
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    results = pose.process(image)

    # Draw the pose annotation on the image.
    image.flags.writeable = True
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    mp_drawing.draw_landmarks(
        image,
        results.pose_landmarks,
        mp_pose.POSE_CONNECTIONS,
        landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
    # Flip the image horizontally for a selfie-view display.11

    # 렌드마크가0~32 를 가진다.
   
    try:
      landmarks = results.pose_landmarks.landmark
      shoulder = [landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y]
      hip = [landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y]
      knee = [landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y]
      ankle = [landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].y]
      print("detect")

      if count > 10:
        df.loc[len(df)] = [shoulder[0], shoulder[1], hip[0], hip[1], knee[0], knee[1], ankle[0], ankle[1], 0]
        print("save")
        count = 0
      
    except:
      print("not find")

    print(count)
    cv2.imshow('MediaPipe Pose', cv2.flip(image, 1))
    count += 1


    if cv2.waitKey(5) & 0xFF == 27:
      title = "./" + return_today() + '.csv'
      df.to_csv(title, encoding="UTF-8")
      break

cap.release()

 

좌표의 값이 달린

csv파일을 생성했다.

 

이를 쥬피터를 통해 EDA하자

import pandas as pd
import numpy as np
from glob import glob
csv_list = glob('./*.csv')
col_list = ["shoulder_x", "shoulder_y", 
            "hip_x", "hip_y", 
            "knee_x", "knee_y",
           "ankle_x", "ankle_y",
           "labels"]

df = pd.DataFrame(columns = col_list)

 

 

 

 

for i in range(len(csv_list)):
    df_temp = pd.read_csv(csv_list[i])
    df = pd.concat([df, df_temp])

df

 

df = df.reset_index()
df
df = df.drop(['index', 'Unnamed: 0'], axis = 1 )
df

 

 

 

 

 

from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
features = df.drop('labels', axis = 1 )
labels = df["labels"]
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=13)
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier

params = {'max_depth' : [1, 2, 3,  4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
sq = DecisionTreeClassifier(max_depth=2, random_state=13)

gridsearch = GridSearchCV(estimator=sq, param_grid=params, cv=5)
gridsearch.fit(features, labels)

 

sq = DecisionTreeClassifier(max_depth=3, random_state=13)
sq.fit(X_train, y_train)
from sklearn.metrics import accuracy_score

y_pred_tr = sq.predict(X_train)
accuracy_score(y_train, y_pred_tr)

정확도가 1??

 

from mlxtend.plotting import plot_decision_regions
import matplotlib.pyplot as plt
from sklearn import tree

fig = plt.figure(figsize=(15, 8))
_ = tree.plot_tree(sq,
                  feature_names=X_train.columns,
                  class_names = ['0.0', '1.0', '2.0'],
                   rounded=True,
                  filled=True)
plt.figure(figsize=(14, 8))

 

디시전 트리가 알려준 조건문으로 접근을 해보자.

if shoulder_y <= 0.415:
    if hip_y <= 0.435:
        if knee_y <= 0.559:
            print("준비자세")
        else:
            print("틀린자세")
    else:
        print("틀린자세")
else:
    print("스쿼트 자세")

이를 코드에 추가했을 때

 

import cv2
import mediapipe as mp
import numpy as np
import pandas as pd
import datetime as dt

mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose

count = 0


cap = cv2.VideoCapture(0)
with mp_pose.Pose(
    min_detection_confidence=0.5,
    min_tracking_confidence=0.5) as pose:
  while cap.isOpened():
    success, image = cap.read()
    if not success:
      print("Ignoring empty camera frame.")
      # If loading a video, use 'break' instead of 'continue'.
      continue

    # To improve performance, optionally mark the image as not writeable to
    # pass by reference.
    image.flags.writeable = False
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    results = pose.process(image)

    # Draw the pose annotation on the image.
    image.flags.writeable = True
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    mp_drawing.draw_landmarks(
        image,
        results.pose_landmarks,
        mp_pose.POSE_CONNECTIONS,
        landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
    # Flip the image horizontally for a selfie-view display.11

    # 렌드마크가0~32 를 가진다.
   
    try:
      landmarks = results.pose_landmarks.landmark
      shoulder = [landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y]
      hip = [landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y]
      knee = [landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y]
      ankle = [landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].y]

      if count > 10:
        if shoulder[1] <= 0.415:
          if hip[1] <= 0.435:
              print("준비자세")
          else:
            print("틀린자세")
        else:
          print("스쿼트 자세")
        count = 0
      
    except:
      pass

    cv2.imshow('MediaPipe Pose', cv2.flip(image, 1))
    count += 1


    if cv2.waitKey(5) & 0xFF == 27:
      break

cap.release()

 

정확도가 낮아서 조건문을 하나 날렸다.

서있냐 앉아있냐 정도는 구분을 한다.

근데, 내 생각에 렌드마크를 더 쓰는게 좋지 않을까 싶다.