説明なし
選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

multi_pcs.py 15KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. import requests
  2. import time
  3. import multiprocessing
  4. from PIL import Image
  5. from functools import partial
  6. import queue
  7. import pickle
  8. import time
  9. import numpy as np
  10. import face_recognition
  11. import os
  12. from flask import Flask, render_template, request, redirect, send_file
  13. # import shutil
  14. import cv2
  15. import datetime
  16. from flask import request
  17. # Gallery = "D:/share/biz/mt/Copy_Gallery/" + str(seconds).replace("]", "").replace("[", "").replace("'", "")
  18. # People = 'D:/share/biz/mt/People/' + str(seconds).replace("]", "").replace("[", "").replace("'", "") + "/"
  19. app = Flask(__name__)
  20. @app.route('/', methods=["GET", "POST"])
  21. def home():
  22. return "EVENT APP RUNNING.............."
  23. def download(eventid):
  24. print("process started with event id = "+str(eventid))
  25. Gallery = "/home/ubuntu/AI/Events/Gallery/" + eventid+ "/"
  26. People = "/home/ubuntu/AI/Events/guestimage/"+ eventid + "/"
  27. def saveEncodings(encs, names, fname="encodings.pickle"):
  28. """
  29. Save encodings in a pickle file to be used in future.
  30. Parameters
  31. ----------
  32. encs : List of np arrays
  33. List of face encodings.
  34. names : List of strings
  35. List of names for each face encoding.
  36. fname : String, optional
  37. Name/Location for pickle file. The default is "encodings.pickle".
  38. Returns
  39. -------
  40. None.
  41. """
  42. data = []
  43. d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
  44. data.extend(d)
  45. encodingsFile = fname
  46. # dump the facial encodings data to disk
  47. print("[INFO] serializing encodings...")
  48. f = open(encodingsFile, "wb")
  49. f.write(pickle.dumps(data))
  50. f.close()
  51. # Function to read encodings
  52. def readEncodingsPickle(fname):
  53. """
  54. Read Pickle file.
  55. Parameters
  56. ----------
  57. fname : String
  58. Name of pickle file.(Full location)
  59. Returns
  60. -------
  61. encodings : list of np arrays
  62. list of all saved encodings
  63. names : List of Strings
  64. List of all saved names
  65. """
  66. data = pickle.loads(open(fname, "rb").read())
  67. data = np.array(data)
  68. encodings = [d["encoding"] for d in data]
  69. names = [d["name"] for d in data]
  70. return encodings, names
  71. # Function to create encodings and get face locations
  72. def createEncodings(image):
  73. print("encoding..")
  74. #print('Detecting_face...........')
  75. """
  76. Create face encodings for a given image and also return face locations in the given image.
  77. Parameters
  78. ----------
  79. image : cv2 mat
  80. Image you want to detect faces from.
  81. Returns
  82. -------
  83. known_encodings : list of np array
  84. List of face encodings in a given image
  85. face_locations : list of tuples
  86. list of tuples for face locations in a given image
  87. """
  88. # Find face locations for all faces in an image
  89. face_locations = face_recognition.face_locations(image)
  90. # Create encodings for all faces in an image
  91. known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
  92. return known_encodings, face_locations
  93. # Function to compare encodings
  94. def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
  95. """
  96. Compares face encodings to check if 2 faces are same or not.
  97. Parameters
  98. ----------
  99. unknown_encoding : np array
  100. Face encoding of unknown people.
  101. known_encodings : np array
  102. Face encodings of known people.
  103. known_names : list of strings
  104. Names of known people
  105. Returns
  106. -------
  107. acceptBool : Bool
  108. face matched or not
  109. duplicateName : String
  110. Name of matched face
  111. distance : Float
  112. Distance between 2 faces
  113. """
  114. duplicateName = ""
  115. distance = 0.0
  116. matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
  117. face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
  118. best_match_index = np.argmin(face_distances)
  119. distance = face_distances[best_match_index]
  120. if matches[best_match_index]:
  121. acceptBool = True
  122. duplicateName = known_names[best_match_index]
  123. else:
  124. acceptBool = False
  125. duplicateName = ""
  126. return acceptBool, duplicateName, distance
  127. p = []
  128. def f_CSVwrite():
  129. import pandas as pd
  130. q = pd.DataFrame(p)
  131. #print(q)
  132. m = q
  133. # print(m)
  134. # x.drop(x.columns[Unnam], axis=1, inplace=True)
  135. df = m.groupby([0], as_index=False).count()
  136. z = df[0].str.split('/', expand=True)
  137. z.to_csv('zzzzzzzzzzzzz.csv',index=False)
  138. import pandas as pd
  139. df2 = pd.read_csv('zzzzzzzzzzzzz.csv')
  140. df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
  141. df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True)
  142. df2 = df2[['Matched', 'test']]
  143. import pandas as pd
  144. import os
  145. c = []
  146. for root, dirs, files in os.walk(Gallery,
  147. topdown=False):
  148. for name in files:
  149. # print(name)
  150. L = os.path.join(root, name)
  151. c.append(L)
  152. df = pd.DataFrame(c)
  153. df1 = df[0].str.split("/", expand=True)
  154. #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
  155. # print('this is df1')
  156. # print(df1)
  157. df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
  158. merge = pd.merge(df2, df1, on='test', how='left')
  159. merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True)
  160. # merge.to_csv('merge.csv')
  161. mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
  162. mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
  163. mergesplit = mergesplit.loc[:, 'ImageName']
  164. #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
  165. #merge['EventName'] = merge['abc']
  166. merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test']
  167. frames = [merge, mergesplit]
  168. r = pd.concat(frames, axis=1, join='inner')
  169. df2 = r.dropna(subset=['Matched'])
  170. #df2['Matched'] = df2['Matched'].astype(str)
  171. #df2['Matched'] = df2['Matched'].astype(int)
  172. column_list = ['Matched', 'Imagepath', 'ImageName', 'EventName']
  173. df2[column_list].to_csv('events.csv', index=False)
  174. df2[column_list].to_json('events.json', orient="records")
  175. # import requests
  176. # import json
  177. # with open('events.json', 'r') as json_file:
  178. # json_load = json.load(json_file)
  179. # url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/events/createpredictedimage"
  180. # #url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/json/eventwisepredicts"
  181. # payload = json.dumps(json_load).replace("]", "").replace("[", "")
  182. # print(payload)
  183. # headers = {
  184. # 'Authorization': 'stat bcc78ad858354e759249c1770957fede',
  185. # 'Content-Type': 'application/json'
  186. # }
  187. # response = requests.request("POST", url, headers=headers, data=payload)
  188. # print("Ongoing process with event id = "+str(eventid))
  189. # print("##############################################################")
  190. # print(response.text)
  191. p.clear()
  192. # Save Image to new directory
  193. def saveImageToDirectory(image, name, imageName):
  194. """
  195. Saves images to directory.
  196. Parameters
  197. ----------
  198. image : cv2 mat
  199. Image you want to save.
  200. name : String
  201. Directory where you want the image to be saved.
  202. imageName : String
  203. Name of image.
  204. Returns
  205. -------
  206. None.
  207. """
  208. path = "./output/" + name
  209. path1 = "./output/" + name
  210. if os.path.exists(path):
  211. pass
  212. else:
  213. os.mkdir(path)
  214. cv2.imwrite(path + "/" + imageName, image)
  215. x = []
  216. c = (path1 + "/" + imageName)
  217. x.append(c)
  218. p.append(x)
  219. f_CSVwrite()
  220. # Function for creating encodings for known people
  221. def processKnownPeopleImages(path=People, saveLocation="./known_encodings.pickle"):
  222. print(People)
  223. """
  224. Process images of known people and create face encodings to compare in future.
  225. Eaach image should have just 1 face in it.
  226. Parameters
  227. ----------
  228. path : STRING, optional
  229. Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
  230. It should be noted that each image in this dataset should contain only 1 face.
  231. saveLocation : STRING, optional
  232. Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
  233. Returns
  234. -------
  235. None.
  236. """
  237. known_encodings = []
  238. known_names = []
  239. for img in os.listdir(path):
  240. imgPath = path + img
  241. # Read image
  242. image = cv2.imread(imgPath)
  243. name = img.rsplit('.')[0]
  244. # Resize
  245. image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
  246. # Get locations and encodings
  247. encs, locs = createEncodings(image)
  248. try:
  249. known_encodings.append(encs[0])
  250. except IndexError:
  251. os.remove(People+img)
  252. known_names.append(name)
  253. for loc in locs:
  254. top, right, bottom, left = loc
  255. # Show Image
  256. #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
  257. # cv2.imshow("Image", image)
  258. # cv2.waitKey(1)
  259. #cv2.destroyAllWindows()
  260. saveEncodings(known_encodings, known_names, saveLocation)
  261. # Function for processing dataset images
  262. def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
  263. """
  264. Process image in dataset from where you want to separate images.
  265. It separates the images into directories of known people, groups and any unknown people images.
  266. Parameters
  267. ----------
  268. path : STRING, optional
  269. Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
  270. It should be noted that each image in this dataset should contain only 1 face.
  271. saveLocation : STRING, optional
  272. Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
  273. Returns
  274. -------
  275. None.
  276. """
  277. # Read pickle file for known people to compare faces from
  278. people_encodings, names = readEncodingsPickle("./known_encodings.pickle")
  279. for root, dirs, files in os.walk(Gallery, topdown=False):
  280. for name in files:
  281. s = os.path.join(root, name)
  282. #print(p)
  283. # imgPath = path + img
  284. # Read image
  285. image = cv2.imread(s)
  286. orig = image.copy()
  287. # Resize
  288. image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
  289. # Get locations and encodings
  290. encs, locs = createEncodings(image)
  291. # Save image to a group image folder if more than one face is in image
  292. # if len(locs) > 1:
  293. # saveImageToDirectory(orig, "Group", img)
  294. # Processing image for each face
  295. i = 0
  296. knownFlag = 0
  297. for loc in locs:
  298. top, right, bottom, left = loc
  299. unknown_encoding = encs[i]
  300. i += 1
  301. acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
  302. if acceptBool:
  303. saveImageToDirectory(orig, duplicateName,name)
  304. knownFlag = 1
  305. if knownFlag == 1:
  306. print("Match Found")
  307. else:
  308. saveImageToDirectory(orig, "0",name)
  309. # Show Image
  310. # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
  311. # # cv2.imshow("Image", image)
  312. # cv2.waitKey(1)
  313. # cv2.destroyAllWindows()
  314. def main():
  315. """
  316. Main Function.
  317. Returns
  318. -------
  319. None.
  320. """
  321. processKnownPeopleImages()
  322. processDatasetImages()
  323. # import pandas as pd
  324. # q = pd.DataFrame(p)
  325. # df1 = q
  326. # print(df1)
  327. # # df1.to_csv('m.csv')
  328. # import pandas as pd
  329. # import os
  330. # c = []
  331. # for root, dirs, files in os.walk(Gallery, topdown=False):
  332. # for name in files:
  333. # L = os.path.join(root, name)
  334. # c.append(L)
  335. # df2 = pd.DataFrame(c)
  336. # # df.to_csv('oswalk.csv')
  337. # import pandas as pd
  338. # # df1 = pd.read_csv('m.csv')
  339. # # df2 = pd.read_csv('oswalk.csv')
  340. # df1 = df1[0].str.split('/', expand=True)
  341. # df1.rename({df1.columns[-2]: 'Matched'}, axis=1, inplace=True)
  342. # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
  343. # df2 = df2[0].str.split("\\", expand=True)
  344. # df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
  345. # df2.rename({df2.columns[-2]: 'Eventname'}, axis=1, inplace=True)
  346. # merge = pd.merge(df2, df1, on='test', how='left')
  347. # mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
  348. # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
  349. # mergesplit = mergesplit.loc[:, 'ImageName']
  350. # merge['Imagepath'] = "/_files/1/Gallery/" + merge['Eventname'] + '/' + merge['test']
  351. # frames = [merge, mergesplit]
  352. # r = pd.concat(frames, axis=1, join='inner')
  353. # first_column = r.pop('Matched')
  354. # r.insert(0, 'Matched', first_column)
  355. # second_column = r.pop('Imagepath')
  356. # r.insert(1, 'Imagepath', second_column)
  357. # third_column = r.pop('ImageName')
  358. # r.insert(2, 'ImageName', third_column)
  359. # fourth_column = r.pop('Eventname')
  360. # r.insert(3, 'Eventname', fourth_column)
  361. # r = r.iloc[:, 0:4]
  362. # r.sort_values(by=['Matched'], inplace=True)
  363. # print(r)
  364. # r.to_csv('path.csv', index=False)
  365. # r.to_json(r'matched.json', orient="records")
  366. print("process Ended with event id = "+str(eventid))
  367. main()
  368. @app.route('/eventwise', methods=["GET", "POST"])
  369. def eventwise():
  370. if __name__ == "__main__":
  371. url_list=[]
  372. Dataset= request.args.get('Dataset')
  373. # id = "100013660000125"
  374. url_list.append(Dataset)
  375. # multiprocessing
  376. with multiprocessing.Pool(processes=10) as pool:
  377. results = pool.map(download,url_list)
  378. pool.close()
  379. return "Done"
  380. if __name__ == "__main__":
  381. app.run(host="0.0.0.0",port=8081)