Bez popisu
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

front_face.py 3.8KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. #from IPython.core.pylabtools import find_gui_and_backend
  2. #from oswalk import files
  3. #from google.colab.patches import cv2_imshow
  4. import cv2
  5. import mediapipe as mp
  6. import numpy as np
  7. import glob
  8. import click
  9. mp_face_mesh = mp.solutions.face_mesh
  10. face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
  11. @click.command()
  12. @click.argument('eventid', default='')
  13. def cap(eventid):
  14. for files in glob.glob("C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\sepration_crop\\"+eventid+"\\*"):
  15. image = cv2.imread(files)
  16. # Flip the image horizontally for a later selfie-view display
  17. # Also convert the color space from BGR to RGB
  18. image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  19. # To improve performance
  20. image.flags.writeable = False
  21. # Get the result
  22. results = face_mesh.process(image)
  23. # To improve performance
  24. image.flags.writeable = True
  25. # Convert the color space from RGB to BGR
  26. image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
  27. img_h, img_w, img_c = image.shape
  28. face_3d = []
  29. face_2d = []
  30. if results.multi_face_landmarks:
  31. for face_landmarks in results.multi_face_landmarks:
  32. for idx, lm in enumerate(face_landmarks.landmark):
  33. if idx == 33 or idx == 263 or idx == 1 or idx == 61 or idx == 291 or idx == 199:
  34. if idx == 1:
  35. nose_2d = (lm.x * img_w, lm.y * img_h)
  36. nose_3d = (lm.x * img_w, lm.y * img_h, lm.z * 8000)
  37. x, y = int(lm.x * img_w), int(lm.y * img_h)
  38. # Get the 2D Coordinates
  39. face_2d.append([x, y])
  40. # Get the 3D Coordinates
  41. face_3d.append([x, y, lm.z])
  42. # Convert it to the NumPy array
  43. face_2d = np.array(face_2d, dtype=np.float64)
  44. # Convert it to the NumPy array
  45. face_3d = np.array(face_3d, dtype=np.float64)
  46. # The camera matrix
  47. focal_length = 1 * img_w
  48. cam_matrix = np.array([ [focal_length, 0, img_h / 2],
  49. [0, focal_length, img_w / 2],
  50. [0, 0, 1]])
  51. # The Distance Matrix
  52. dist_matrix = np.zeros((4, 1), dtype=np.float64)
  53. # Solve PnP
  54. success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)
  55. # Get rotational matrix
  56. rmat, jac = cv2.Rodrigues(rot_vec)
  57. # Get angles
  58. angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
  59. # Get the y rotation degree
  60. x = angles[0] * 360
  61. y = angles[1] * 360
  62. # print(y)
  63. # See where the user's head tilting
  64. if y < -20:
  65. text = "Left"
  66. elif y > 20:
  67. text = "Right"
  68. elif x < -20:
  69. text = "Down"
  70. else:
  71. text = "Forward"
  72. #djtillu.append(files)
  73. print(files)
  74. import os
  75. import shutil
  76. shutil.copy2(files, 'C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\front_face\\'+eventid+"\\")
  77. # Display the nose direction
  78. nose_3d_projection, jacobian = cv2.projectPoints(nose_3d, rot_vec, trans_vec, cam_matrix, dist_matrix)
  79. p1 = (int(nose_2d[0]), int(nose_2d[1]))
  80. p2 = (int(nose_3d_projection[0][0][0]), int(nose_3d_projection[0][0][1]))
  81. cv2.line(image, p1, p2, (255, 0, 0), 2)
  82. # Add the text on the image
  83. cv2.putText(image, text, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
  84. # cv2_imshow(image)
  85. #
  86. # if cv2.waitKey(5) & 0xFF == 27:
  87. # pass
  88. cap()