dlib 获取人脸姿态 6个关键点

举报
风吹稻花香 发表于 2021/06/05 00:41:18 2021/06/05
【摘要】   源代码:https://github.com/nekoze1004/HeadPoseTrackingTest/blob/cf3ee683bb2399fe018e54077200a8a5009f6de2/HeadPoseTest02.py import dlibimport cv2import numpy as npfrom imutils import fa...

 

源代码:https://github.com/nekoze1004/HeadPoseTrackingTest/blob/cf3ee683bb2399fe018e54077200a8a5009f6de2/HeadPoseTest02.py


  
  1. import dlib
  2. import cv2
  3. import numpy as np
  4. from imutils import face_utils
  5. predictor_path = "./shape_predictor_68_face_landmarks.dat"
  6. predictor = dlib.shape_predictor(predictor_path)
  7. detector = dlib.get_frontal_face_detector()
  8. # 使用する3Dモデルに合わせて変更する必要がある
  9. model_points = np.array([
  10. (0.0, 0.0, 0.0), # 鼻先
  11. (0.0, -330.0, -65.0), # 顎
  12. (-225.0, 170.0, -135.0), # 左目左端
  13. (225.0, 170.0, -135.0), # 右目右端
  14. (-150.0, -150.0, -125.0), # 口の左端
  15. (150.0, -150.0, -125.0) # 口の右端
  16. ])
  17. def face_shape_detecter_dlib(img):
  18. img_rgb = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
  19. result = []
  20. points = [30, 8, 45, 36, 54, 48] # 鼻先、顎、左目左端、右目右端、口の左端、口の右端の番号
  21. colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0), (127, 127, 0), (127, 0, 127), (0, 127, 127)]
  22. dets, scores, idx = detector.run(img_rgb, 0)
  23. if len(dets) > 0:
  24. for i, rect in enumerate(dets):
  25. shape = predictor(img_rgb, rect)
  26. shape = face_utils.shape_to_np(shape)
  27. clone = img.copy()
  28. # clone = np.zeros_like(img)
  29. for (x, y) in shape:
  30. cv2.circle(clone, (x, y), 2, (127, 127, 127), -1)
  31. c = 0
  32. for i in points:
  33. result.append(shape[i])
  34. cv2.putText(clone, str(i) + ": " + str(shape[i][0]) + ", " + str(shape[i][1]), (10, (c + 1) * 20),
  35. cv2.FONT_HERSHEY_SIMPLEX, 0.7, colors[c], 2)
  36. cv2.circle(clone, (shape[i][0], shape[i][1]), 3, colors[c], -1)
  37. c += 1
  38. # print(result)
  39. return result, clone
  40. else:
  41. return result, img
  42. def solvePnP(model_points, img_points, camera_matrix):
  43. dist_coeffs = np.zeros((4, 1), np.double) # レンズの歪みが無いと仮定
  44. success, rotation_vector, translation_vecter = cv2.solvePnP(model_points, img_points, camera_matrix, dist_coeffs)
  45. return rotation_vector, translation_vecter
  46. def rodrigues(rotation_vector):
  47. rotation_matrix, jacobian = cv2.Rodrigues(rotation_vector)
  48. return rotation_matrix
  49. def projectionMat(rotation_matrix):
  50. projMat = np.array(
  51. [[rotation_matrix[0][0], rotation_matrix[0][1], rotation_matrix[0][2], 0],
  52. [rotation_matrix[1][0], rotation_matrix[1][1], rotation_matrix[1][2], 0],
  53. [rotation_matrix[2][0], rotation_matrix[2][1], rotation_matrix[2][2], 0]]
  54. )
  55. return projMat
  56. def decomposeProjectionMatrix(projMat):
  57. a = cv2.decomposeProjectionMatrix(projMat)
  58. return a[6] # pitch, yaw, roll
  59. def main():
  60. cap = cv2.VideoCapture(0)
  61. H = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
  62. W = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
  63. size = (H, W)
  64. # カメラ校正を適当に行う
  65. focal_length = size[1]
  66. center = (size[1] / 2, size[0] / 2)
  67. camera_matrix = np.array(
  68. [[focal_length, 0, center[0]],
  69. [0, focal_length, center[1]],
  70. [0, 0, 1]], np.double
  71. )
  72. while True:
  73. ret, frame = cap.read()
  74. face_list, dframe = face_shape_detecter_dlib(frame)
  75. if face_list is not None:
  76. try:
  77. img_points = np.array([
  78. face_list[0], # 鼻先
  79. face_list[1], # 顎
  80. face_list[2], # 左目左端
  81. face_list[3], # 右目右端
  82. face_list[4], # 口の左端
  83. face_list[5] # 口の右端
  84. ], np.double)
  85. # print(img_points)
  86. rotation_vector, translation_vector = solvePnP(model_points, img_points, camera_matrix)
  87. # print("Rotation Vector: \n {}".format(rotation_vector))
  88. # print("Translation Vecter \n {}".format(translation_vector))
  89. rotation_matrix = rodrigues(rotation_vector)
  90. projMat = projectionMat(rotation_matrix)
  91. pitch, yaw, roll = decomposeProjectionMatrix(projMat)
  92. print("\n--------------------\npitch: {}\nyaw : {}\nroll : {}\n".format(pitch, yaw, roll))
  93. cv2.putText(dframe, "pitch : " + str(pitch), (10, 160), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (127, 127, 127),
  94. 2)
  95. cv2.putText(dframe, "yaw : " + str(yaw), (10, 180), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (127, 127, 127), 2)
  96. cv2.putText(dframe, "roll : " + str(roll), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (127, 127, 127), 2)
  97. except:
  98. print("例外") # トラッキングが外れた感じですな
  99. try:
  100. cv2.imshow("img", dframe)
  101. except TypeError:
  102. cv2.imshow("img", frame)
  103. c = cv2.waitKey(1)
  104. if c == 27: # ESCキー
  105. break
  106. cap.release()
  107. cv2.destroyAllWindows()
  108. if __name__ == '__main__':
  109. main()

 

文章来源: blog.csdn.net,作者:网奇,版权归原作者所有,如需转载,请联系作者。

原文链接:blog.csdn.net/jacke121/article/details/100716449

【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。