|
@@ -1,493 +1,493 @@
|
1
|
|
-import requests
|
2
|
|
-import time
|
3
|
|
-import multiprocessing
|
4
|
|
-from PIL import Image
|
5
|
|
-from functools import partial
|
6
|
|
-import queue
|
7
|
|
-import pickle
|
8
|
|
-import time
|
9
|
|
-
|
10
|
|
-import numpy as np
|
11
|
|
-import face_recognition
|
12
|
|
-import os
|
13
|
|
-from flask import Flask, render_template, request, redirect, send_file
|
14
|
|
-# import shutil
|
15
|
|
-import cv2
|
16
|
|
-import datetime
|
17
|
|
-from flask import request
|
18
|
|
-
|
19
|
|
-# Gallery = "D:/share/biz/mt/Copy_Gallery/" + str(seconds).replace("]", "").replace("[", "").replace("'", "")
|
20
|
|
-# People = 'D:/share/biz/mt/People/' + str(seconds).replace("]", "").replace("[", "").replace("'", "") + "/"
|
21
|
|
-app = Flask(__name__)
|
22
|
|
-
|
23
|
|
-@app.route('/', methods=["GET", "POST"])
|
24
|
|
-def home():
|
25
|
|
- return "EVENT APP RUNNING.............."
|
26
|
|
-
|
27
|
|
-
|
28
|
|
-
|
29
|
|
-def download(eventid):
|
30
|
|
- print("process started with event id = "+str(eventid))
|
31
|
|
-
|
32
|
|
-
|
33
|
|
- Gallery = "/home/ubuntu/AI/Events/Gallery/" + eventid+ "/"
|
34
|
|
- People = "/home/ubuntu/AI/Events/guestimage/"+ eventid + "/"
|
35
|
|
-
|
36
|
|
-
|
37
|
|
-
|
38
|
|
-
|
39
|
|
-
|
40
|
|
- def saveEncodings(encs, names, fname="encodings.pickle"):
|
41
|
|
- """
|
42
|
|
- Save encodings in a pickle file to be used in future.
|
43
|
|
-
|
44
|
|
- Parameters
|
45
|
|
- ----------
|
46
|
|
- encs : List of np arrays
|
47
|
|
- List of face encodings.
|
48
|
|
- names : List of strings
|
49
|
|
- List of names for each face encoding.
|
50
|
|
- fname : String, optional
|
51
|
|
- Name/Location for pickle file. The default is "encodings.pickle".
|
52
|
|
-
|
53
|
|
- Returns
|
54
|
|
- -------
|
55
|
|
- None.
|
56
|
|
-
|
57
|
|
- """
|
58
|
|
-
|
59
|
|
- data = []
|
60
|
|
- d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
|
61
|
|
- data.extend(d)
|
62
|
|
-
|
63
|
|
- encodingsFile = fname
|
64
|
|
-
|
65
|
|
- # dump the facial encodings data to disk
|
66
|
|
- print("[INFO] serializing encodings...")
|
67
|
|
- f = open(encodingsFile, "wb")
|
68
|
|
- f.write(pickle.dumps(data))
|
69
|
|
- f.close()
|
70
|
|
-
|
71
|
|
- # Function to read encodings
|
72
|
|
-
|
73
|
|
- def readEncodingsPickle(fname):
|
74
|
|
- """
|
75
|
|
- Read Pickle file.
|
76
|
|
-
|
77
|
|
- Parameters
|
78
|
|
- ----------
|
79
|
|
- fname : String
|
80
|
|
- Name of pickle file.(Full location)
|
81
|
|
-
|
82
|
|
- Returns
|
83
|
|
- -------
|
84
|
|
- encodings : list of np arrays
|
85
|
|
- list of all saved encodings
|
86
|
|
- names : List of Strings
|
87
|
|
- List of all saved names
|
88
|
|
-
|
89
|
|
- """
|
90
|
|
-
|
91
|
|
- data = pickle.loads(open(fname, "rb").read())
|
92
|
|
- data = np.array(data)
|
93
|
|
- encodings = [d["encoding"] for d in data]
|
94
|
|
- names = [d["name"] for d in data]
|
95
|
|
- return encodings, names
|
96
|
|
-
|
97
|
|
- # Function to create encodings and get face locations
|
98
|
|
- def createEncodings(image):
|
99
|
|
- print("encoding..")
|
100
|
|
- #print('Detecting_face...........')
|
101
|
|
- """
|
102
|
|
- Create face encodings for a given image and also return face locations in the given image.
|
103
|
|
-
|
104
|
|
- Parameters
|
105
|
|
- ----------
|
106
|
|
- image : cv2 mat
|
107
|
|
- Image you want to detect faces from.
|
108
|
|
-
|
109
|
|
- Returns
|
110
|
|
- -------
|
111
|
|
- known_encodings : list of np array
|
112
|
|
- List of face encodings in a given image
|
113
|
|
- face_locations : list of tuples
|
114
|
|
- list of tuples for face locations in a given image
|
115
|
|
-
|
116
|
|
- """
|
117
|
|
-
|
118
|
|
- # Find face locations for all faces in an image
|
119
|
|
- face_locations = face_recognition.face_locations(image)
|
120
|
|
-
|
121
|
|
- # Create encodings for all faces in an image
|
122
|
|
- known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
|
123
|
|
- return known_encodings, face_locations
|
124
|
|
-
|
125
|
|
- # Function to compare encodings
|
126
|
|
- def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
|
127
|
|
- """
|
128
|
|
- Compares face encodings to check if 2 faces are same or not.
|
129
|
|
-
|
130
|
|
- Parameters
|
131
|
|
- ----------
|
132
|
|
- unknown_encoding : np array
|
133
|
|
- Face encoding of unknown people.
|
134
|
|
- known_encodings : np array
|
135
|
|
- Face encodings of known people.
|
136
|
|
- known_names : list of strings
|
137
|
|
- Names of known people
|
138
|
|
-
|
139
|
|
- Returns
|
140
|
|
- -------
|
141
|
|
- acceptBool : Bool
|
142
|
|
- face matched or not
|
143
|
|
- duplicateName : String
|
144
|
|
- Name of matched face
|
145
|
|
- distance : Float
|
146
|
|
- Distance between 2 faces
|
147
|
|
-
|
148
|
|
- """
|
149
|
|
- duplicateName = ""
|
150
|
|
- distance = 0.0
|
151
|
|
- matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
|
152
|
|
- face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
|
153
|
|
- best_match_index = np.argmin(face_distances)
|
154
|
|
- distance = face_distances[best_match_index]
|
155
|
|
- if matches[best_match_index]:
|
156
|
|
- acceptBool = True
|
157
|
|
- duplicateName = known_names[best_match_index]
|
158
|
|
- else:
|
159
|
|
- acceptBool = False
|
160
|
|
- duplicateName = ""
|
161
|
|
- return acceptBool, duplicateName, distance
|
162
|
|
-
|
163
|
|
- p = []
|
164
|
|
-
|
165
|
|
- def f_CSVwrite():
|
166
|
|
- import pandas as pd
|
167
|
|
- q = pd.DataFrame(p)
|
168
|
|
- #print(q)
|
169
|
|
- m = q
|
170
|
|
- # print(m)
|
171
|
|
- # x.drop(x.columns[Unnam], axis=1, inplace=True)
|
172
|
|
- df = m.groupby([0], as_index=False).count()
|
173
|
|
- z = df[0].str.split('/', expand=True)
|
174
|
|
-
|
175
|
|
-
|
176
|
|
- z.to_csv('zzzzzzzzzzzzz.csv',index=False)
|
177
|
|
- import pandas as pd
|
178
|
|
- df2 = pd.read_csv('zzzzzzzzzzzzz.csv')
|
179
|
|
- df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
|
180
|
|
- df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True)
|
181
|
|
- df2 = df2[['Matched', 'test']]
|
182
|
|
-
|
183
|
|
-
|
184
|
|
- import pandas as pd
|
185
|
|
- import os
|
186
|
|
- c = []
|
187
|
|
- for root, dirs, files in os.walk(Gallery,
|
188
|
|
- topdown=False):
|
189
|
|
- for name in files:
|
190
|
|
- # print(name)
|
191
|
|
- L = os.path.join(root, name)
|
192
|
|
- c.append(L)
|
193
|
|
- df = pd.DataFrame(c)
|
194
|
|
-
|
195
|
|
- df1 = df[0].str.split("/", expand=True)
|
196
|
|
- #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
|
197
|
|
- # print('this is df1')
|
198
|
|
- # print(df1)
|
199
|
|
- df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
|
200
|
|
- merge = pd.merge(df2, df1, on='test', how='left')
|
201
|
|
- merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True)
|
202
|
|
- # merge.to_csv('merge.csv')
|
203
|
|
- mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
|
204
|
|
- mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
|
205
|
|
- mergesplit = mergesplit.loc[:, 'ImageName']
|
206
|
|
-
|
207
|
|
- #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
|
208
|
|
- #merge['EventName'] = merge['abc']
|
209
|
|
- merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test']
|
210
|
|
-
|
211
|
|
-
|
212
|
|
- frames = [merge, mergesplit]
|
213
|
|
-
|
214
|
|
- r = pd.concat(frames, axis=1, join='inner')
|
215
|
|
-
|
216
|
|
-
|
217
|
|
- df2 = r.dropna(subset=['Matched'])
|
218
|
|
-
|
219
|
|
-
|
220
|
|
- #df2['Matched'] = df2['Matched'].astype(str)
|
221
|
|
- #df2['Matched'] = df2['Matched'].astype(int)
|
222
|
|
- column_list = ['Matched', 'Imagepath', 'ImageName', 'EventName']
|
223
|
|
- df2[column_list].to_csv('events.csv', index=False)
|
224
|
|
- df2[column_list].to_json('events.json', orient="records")
|
225
|
|
-
|
226
|
|
-
|
227
|
|
-
|
228
|
|
-
|
229
|
|
-
|
230
|
|
- # import requests
|
231
|
|
- # import json
|
232
|
|
-
|
233
|
|
- # with open('events.json', 'r') as json_file:
|
234
|
|
- # json_load = json.load(json_file)
|
235
|
|
- # url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/events/createpredictedimage"
|
236
|
|
- # #url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/json/eventwisepredicts"
|
237
|
|
-
|
238
|
|
- # payload = json.dumps(json_load).replace("]", "").replace("[", "")
|
239
|
|
- # print(payload)
|
240
|
|
- # headers = {
|
241
|
|
- # 'Authorization': 'stat bcc78ad858354e759249c1770957fede',
|
242
|
|
-
|
243
|
|
-
|
244
|
|
- # 'Content-Type': 'application/json'
|
245
|
|
- # }
|
246
|
|
- # response = requests.request("POST", url, headers=headers, data=payload)
|
247
|
|
- # print("Ongoing process with event id = "+str(eventid))
|
248
|
|
- # print("##############################################################")
|
249
|
|
- # print(response.text)
|
250
|
|
-
|
251
|
|
- p.clear()
|
252
|
|
-
|
253
|
|
-
|
254
|
|
-
|
255
|
|
-
|
256
|
|
-
|
257
|
|
- # Save Image to new directory
|
258
|
|
- def saveImageToDirectory(image, name, imageName):
|
259
|
|
- """
|
260
|
|
- Saves images to directory.
|
261
|
|
-
|
262
|
|
- Parameters
|
263
|
|
- ----------
|
264
|
|
- image : cv2 mat
|
265
|
|
- Image you want to save.
|
266
|
|
- name : String
|
267
|
|
- Directory where you want the image to be saved.
|
268
|
|
- imageName : String
|
269
|
|
- Name of image.
|
270
|
|
-
|
271
|
|
- Returns
|
272
|
|
- -------
|
273
|
|
- None.
|
274
|
|
-
|
275
|
|
- """
|
276
|
|
- path = "./output/" + name
|
277
|
|
- path1 = "./output/" + name
|
278
|
|
- if os.path.exists(path):
|
279
|
|
- pass
|
280
|
|
- else:
|
281
|
|
- os.mkdir(path)
|
282
|
|
- cv2.imwrite(path + "/" + imageName, image)
|
283
|
|
- x = []
|
284
|
|
- c = (path1 + "/" + imageName)
|
285
|
|
- x.append(c)
|
286
|
|
- p.append(x)
|
287
|
|
- f_CSVwrite()
|
288
|
|
-
|
289
|
|
- # Function for creating encodings for known people
|
290
|
|
- def processKnownPeopleImages(path=People, saveLocation="./known_encodings.pickle"):
|
291
|
|
- print(People)
|
292
|
|
- """
|
293
|
|
- Process images of known people and create face encodings to compare in future.
|
294
|
|
- Eaach image should have just 1 face in it.
|
295
|
|
-
|
296
|
|
- Parameters
|
297
|
|
- ----------
|
298
|
|
- path : STRING, optional
|
299
|
|
- Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
|
300
|
|
- It should be noted that each image in this dataset should contain only 1 face.
|
301
|
|
- saveLocation : STRING, optional
|
302
|
|
- Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
|
303
|
|
-
|
304
|
|
- Returns
|
305
|
|
- -------
|
306
|
|
- None.
|
307
|
|
-
|
308
|
|
- """
|
309
|
|
-
|
310
|
|
- known_encodings = []
|
311
|
|
- known_names = []
|
312
|
|
- for img in os.listdir(path):
|
313
|
|
- imgPath = path + img
|
314
|
|
-
|
315
|
|
- # Read image
|
316
|
|
- image = cv2.imread(imgPath)
|
317
|
|
- name = img.rsplit('.')[0]
|
318
|
|
- # Resize
|
319
|
|
- image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
|
320
|
|
-
|
321
|
|
- # Get locations and encodings
|
322
|
|
- encs, locs = createEncodings(image)
|
323
|
|
- try:
|
324
|
|
- known_encodings.append(encs[0])
|
325
|
|
- except IndexError:
|
326
|
|
- os.remove(People+img)
|
327
|
|
- known_names.append(name)
|
328
|
|
-
|
329
|
|
- for loc in locs:
|
330
|
|
- top, right, bottom, left = loc
|
331
|
|
-
|
332
|
|
- # Show Image
|
333
|
|
- #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
|
334
|
|
- # cv2.imshow("Image", image)
|
335
|
|
- # cv2.waitKey(1)
|
336
|
|
- #cv2.destroyAllWindows()
|
337
|
|
- saveEncodings(known_encodings, known_names, saveLocation)
|
338
|
|
-
|
339
|
|
- # Function for processing dataset images
|
340
|
|
- def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
|
341
|
|
- """
|
342
|
|
- Process image in dataset from where you want to separate images.
|
343
|
|
- It separates the images into directories of known people, groups and any unknown people images.
|
344
|
|
- Parameters
|
345
|
|
- ----------
|
346
|
|
- path : STRING, optional
|
347
|
|
- Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
|
348
|
|
- It should be noted that each image in this dataset should contain only 1 face.
|
349
|
|
- saveLocation : STRING, optional
|
350
|
|
- Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
|
351
|
|
-
|
352
|
|
- Returns
|
353
|
|
- -------
|
354
|
|
- None.
|
355
|
|
-
|
356
|
|
- """
|
357
|
|
- # Read pickle file for known people to compare faces from
|
358
|
|
- people_encodings, names = readEncodingsPickle("./known_encodings.pickle")
|
359
|
|
-
|
360
|
|
-
|
361
|
|
- for root, dirs, files in os.walk(Gallery, topdown=False):
|
362
|
|
-
|
363
|
|
- for name in files:
|
364
|
|
- s = os.path.join(root, name)
|
365
|
|
- #print(p)
|
366
|
|
- # imgPath = path + img
|
367
|
|
-
|
368
|
|
- # Read image
|
369
|
|
- image = cv2.imread(s)
|
370
|
|
- orig = image.copy()
|
371
|
|
-
|
372
|
|
- # Resize
|
373
|
|
- image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
|
374
|
|
-
|
375
|
|
- # Get locations and encodings
|
376
|
|
- encs, locs = createEncodings(image)
|
377
|
|
-
|
378
|
|
- # Save image to a group image folder if more than one face is in image
|
379
|
|
- # if len(locs) > 1:
|
380
|
|
- # saveImageToDirectory(orig, "Group", img)
|
381
|
|
-
|
382
|
|
- # Processing image for each face
|
383
|
|
- i = 0
|
384
|
|
- knownFlag = 0
|
385
|
|
- for loc in locs:
|
386
|
|
- top, right, bottom, left = loc
|
387
|
|
- unknown_encoding = encs[i]
|
388
|
|
- i += 1
|
389
|
|
- acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
|
390
|
|
- if acceptBool:
|
391
|
|
- saveImageToDirectory(orig, duplicateName,name)
|
392
|
|
- knownFlag = 1
|
393
|
|
- if knownFlag == 1:
|
394
|
|
- print("Match Found")
|
395
|
|
- else:
|
396
|
|
- saveImageToDirectory(orig, "0",name)
|
397
|
|
-
|
398
|
|
-
|
399
|
|
- # Show Image
|
400
|
|
- # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
|
401
|
|
- # # cv2.imshow("Image", image)
|
402
|
|
- # cv2.waitKey(1)
|
403
|
|
- # cv2.destroyAllWindows()
|
404
|
|
-
|
405
|
|
-
|
406
|
|
- def main():
|
407
|
|
- """
|
408
|
|
- Main Function.
|
409
|
|
-
|
410
|
|
- Returns
|
411
|
|
- -------
|
412
|
|
- None.
|
413
|
|
-
|
414
|
|
- """
|
415
|
|
-
|
416
|
|
- processKnownPeopleImages()
|
417
|
|
- processDatasetImages()
|
418
|
|
-
|
419
|
|
- # import pandas as pd
|
420
|
|
- # q = pd.DataFrame(p)
|
421
|
|
- # df1 = q
|
422
|
|
- # print(df1)
|
423
|
|
- # # df1.to_csv('m.csv')
|
424
|
|
-
|
425
|
|
- # import pandas as pd
|
426
|
|
- # import os
|
427
|
|
- # c = []
|
428
|
|
- # for root, dirs, files in os.walk(Gallery, topdown=False):
|
429
|
|
- # for name in files:
|
430
|
|
- # L = os.path.join(root, name)
|
431
|
|
- # c.append(L)
|
432
|
|
- # df2 = pd.DataFrame(c)
|
433
|
|
- # # df.to_csv('oswalk.csv')
|
434
|
|
- # import pandas as pd
|
435
|
|
- # # df1 = pd.read_csv('m.csv')
|
436
|
|
- # # df2 = pd.read_csv('oswalk.csv')
|
437
|
|
- # df1 = df1[0].str.split('/', expand=True)
|
438
|
|
- # df1.rename({df1.columns[-2]: 'Matched'}, axis=1, inplace=True)
|
439
|
|
- # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
|
440
|
|
- # df2 = df2[0].str.split("\\", expand=True)
|
441
|
|
- # df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
|
442
|
|
- # df2.rename({df2.columns[-2]: 'Eventname'}, axis=1, inplace=True)
|
443
|
|
- # merge = pd.merge(df2, df1, on='test', how='left')
|
444
|
|
- # mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
|
445
|
|
- # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
|
446
|
|
- # mergesplit = mergesplit.loc[:, 'ImageName']
|
447
|
|
- # merge['Imagepath'] = "/_files/1/Gallery/" + merge['Eventname'] + '/' + merge['test']
|
448
|
|
- # frames = [merge, mergesplit]
|
449
|
|
- # r = pd.concat(frames, axis=1, join='inner')
|
450
|
|
- # first_column = r.pop('Matched')
|
451
|
|
- # r.insert(0, 'Matched', first_column)
|
452
|
|
- # second_column = r.pop('Imagepath')
|
453
|
|
- # r.insert(1, 'Imagepath', second_column)
|
454
|
|
- # third_column = r.pop('ImageName')
|
455
|
|
- # r.insert(2, 'ImageName', third_column)
|
456
|
|
- # fourth_column = r.pop('Eventname')
|
457
|
|
- # r.insert(3, 'Eventname', fourth_column)
|
458
|
|
- # r = r.iloc[:, 0:4]
|
459
|
|
- # r.sort_values(by=['Matched'], inplace=True)
|
460
|
|
- # print(r)
|
461
|
|
- # r.to_csv('path.csv', index=False)
|
462
|
|
- # r.to_json(r'matched.json', orient="records")
|
463
|
|
- print("process Ended with event id = "+str(eventid))
|
464
|
|
-
|
465
|
|
- main()
|
466
|
|
-
|
467
|
|
-
|
468
|
|
-
|
469
|
|
-
|
470
|
|
-
|
471
|
|
-
|
472
|
|
-
|
473
|
|
-
|
474
|
|
-
|
475
|
|
-
|
476
|
|
-
|
477
|
|
-
|
478
|
|
-@app.route('/eventwise', methods=["GET", "POST"])
|
479
|
|
-def eventwise():
|
480
|
|
- if __name__ == "__main__":
|
481
|
|
-
|
482
|
|
- url_list=[]
|
483
|
|
- Dataset= request.args.get('Dataset')
|
484
|
|
- # id = "100013660000125"
|
485
|
|
- url_list.append(Dataset)
|
486
|
|
- # multiprocessing
|
487
|
|
- with multiprocessing.Pool(processes=10) as pool:
|
488
|
|
- results = pool.map(download,url_list)
|
489
|
|
- pool.close()
|
490
|
|
- return "none"
|
491
|
|
-
|
492
|
|
-if __name__ == "__main__":
|
|
1
|
+import requests
|
|
2
|
+import time
|
|
3
|
+import multiprocessing
|
|
4
|
+from PIL import Image
|
|
5
|
+from functools import partial
|
|
6
|
+import queue
|
|
7
|
+import pickle
|
|
8
|
+import time
|
|
9
|
+
|
|
10
|
+import numpy as np
|
|
11
|
+import face_recognition
|
|
12
|
+import os
|
|
13
|
+from flask import Flask, render_template, request, redirect, send_file
|
|
14
|
+# import shutil
|
|
15
|
+import cv2
|
|
16
|
+import datetime
|
|
17
|
+from flask import request
|
|
18
|
+
|
|
19
|
+# Gallery = "D:/share/biz/mt/Copy_Gallery/" + str(seconds).replace("]", "").replace("[", "").replace("'", "")
|
|
20
|
+# People = 'D:/share/biz/mt/People/' + str(seconds).replace("]", "").replace("[", "").replace("'", "") + "/"
|
|
21
|
+app = Flask(__name__)
|
|
22
|
+
|
|
23
|
+@app.route('/', methods=["GET", "POST"])
|
|
24
|
+def home():
|
|
25
|
+ return "EVENT APP RUNNING.............."
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+def download(eventid):
|
|
30
|
+ print("process started with event id = "+str(eventid))
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+ Gallery = "/home/ubuntu/AI/Events/Gallery/" + eventid+ "/"
|
|
34
|
+ People = "/home/ubuntu/AI/Events/guestimage/"+ eventid + "/"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+ def saveEncodings(encs, names, fname="encodings.pickle"):
|
|
41
|
+ """
|
|
42
|
+ Save encodings in a pickle file to be used in future.
|
|
43
|
+
|
|
44
|
+ Parameters
|
|
45
|
+ ----------
|
|
46
|
+ encs : List of np arrays
|
|
47
|
+ List of face encodings.
|
|
48
|
+ names : List of strings
|
|
49
|
+ List of names for each face encoding.
|
|
50
|
+ fname : String, optional
|
|
51
|
+ Name/Location for pickle file. The default is "encodings.pickle".
|
|
52
|
+
|
|
53
|
+ Returns
|
|
54
|
+ -------
|
|
55
|
+ None.
|
|
56
|
+
|
|
57
|
+ """
|
|
58
|
+
|
|
59
|
+ data = []
|
|
60
|
+ d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
|
|
61
|
+ data.extend(d)
|
|
62
|
+
|
|
63
|
+ encodingsFile = fname
|
|
64
|
+
|
|
65
|
+ # dump the facial encodings data to disk
|
|
66
|
+ print("[INFO] serializing encodings...")
|
|
67
|
+ f = open(encodingsFile, "wb")
|
|
68
|
+ f.write(pickle.dumps(data))
|
|
69
|
+ f.close()
|
|
70
|
+
|
|
71
|
+ # Function to read encodings
|
|
72
|
+
|
|
73
|
+ def readEncodingsPickle(fname):
|
|
74
|
+ """
|
|
75
|
+ Read Pickle file.
|
|
76
|
+
|
|
77
|
+ Parameters
|
|
78
|
+ ----------
|
|
79
|
+ fname : String
|
|
80
|
+ Name of pickle file.(Full location)
|
|
81
|
+
|
|
82
|
+ Returns
|
|
83
|
+ -------
|
|
84
|
+ encodings : list of np arrays
|
|
85
|
+ list of all saved encodings
|
|
86
|
+ names : List of Strings
|
|
87
|
+ List of all saved names
|
|
88
|
+
|
|
89
|
+ """
|
|
90
|
+
|
|
91
|
+ data = pickle.loads(open(fname, "rb").read())
|
|
92
|
+ data = np.array(data)
|
|
93
|
+ encodings = [d["encoding"] for d in data]
|
|
94
|
+ names = [d["name"] for d in data]
|
|
95
|
+ return encodings, names
|
|
96
|
+
|
|
97
|
+ # Function to create encodings and get face locations
|
|
98
|
+ def createEncodings(image):
|
|
99
|
+ print("encoding..")
|
|
100
|
+ #print('Detecting_face...........')
|
|
101
|
+ """
|
|
102
|
+ Create face encodings for a given image and also return face locations in the given image.
|
|
103
|
+
|
|
104
|
+ Parameters
|
|
105
|
+ ----------
|
|
106
|
+ image : cv2 mat
|
|
107
|
+ Image you want to detect faces from.
|
|
108
|
+
|
|
109
|
+ Returns
|
|
110
|
+ -------
|
|
111
|
+ known_encodings : list of np array
|
|
112
|
+ List of face encodings in a given image
|
|
113
|
+ face_locations : list of tuples
|
|
114
|
+ list of tuples for face locations in a given image
|
|
115
|
+
|
|
116
|
+ """
|
|
117
|
+
|
|
118
|
+ # Find face locations for all faces in an image
|
|
119
|
+ face_locations = face_recognition.face_locations(image)
|
|
120
|
+
|
|
121
|
+ # Create encodings for all faces in an image
|
|
122
|
+ known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
|
|
123
|
+ return known_encodings, face_locations
|
|
124
|
+
|
|
125
|
+ # Function to compare encodings
|
|
126
|
+ def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
|
|
127
|
+ """
|
|
128
|
+ Compares face encodings to check if 2 faces are same or not.
|
|
129
|
+
|
|
130
|
+ Parameters
|
|
131
|
+ ----------
|
|
132
|
+ unknown_encoding : np array
|
|
133
|
+ Face encoding of unknown people.
|
|
134
|
+ known_encodings : np array
|
|
135
|
+ Face encodings of known people.
|
|
136
|
+ known_names : list of strings
|
|
137
|
+ Names of known people
|
|
138
|
+
|
|
139
|
+ Returns
|
|
140
|
+ -------
|
|
141
|
+ acceptBool : Bool
|
|
142
|
+ face matched or not
|
|
143
|
+ duplicateName : String
|
|
144
|
+ Name of matched face
|
|
145
|
+ distance : Float
|
|
146
|
+ Distance between 2 faces
|
|
147
|
+
|
|
148
|
+ """
|
|
149
|
+ duplicateName = ""
|
|
150
|
+ distance = 0.0
|
|
151
|
+ matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
|
|
152
|
+ face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
|
|
153
|
+ best_match_index = np.argmin(face_distances)
|
|
154
|
+ distance = face_distances[best_match_index]
|
|
155
|
+ if matches[best_match_index]:
|
|
156
|
+ acceptBool = True
|
|
157
|
+ duplicateName = known_names[best_match_index]
|
|
158
|
+ else:
|
|
159
|
+ acceptBool = False
|
|
160
|
+ duplicateName = ""
|
|
161
|
+ return acceptBool, duplicateName, distance
|
|
162
|
+
|
|
163
|
+ p = []
|
|
164
|
+
|
|
165
|
+ def f_CSVwrite():
|
|
166
|
+ import pandas as pd
|
|
167
|
+ q = pd.DataFrame(p)
|
|
168
|
+ #print(q)
|
|
169
|
+ m = q
|
|
170
|
+ # print(m)
|
|
171
|
+ # x.drop(x.columns[Unnam], axis=1, inplace=True)
|
|
172
|
+ df = m.groupby([0], as_index=False).count()
|
|
173
|
+ z = df[0].str.split('/', expand=True)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+ z.to_csv('zzzzzzzzzzzzz.csv',index=False)
|
|
177
|
+ import pandas as pd
|
|
178
|
+ df2 = pd.read_csv('zzzzzzzzzzzzz.csv')
|
|
179
|
+ df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
|
|
180
|
+ df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True)
|
|
181
|
+ df2 = df2[['Matched', 'test']]
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+ import pandas as pd
|
|
185
|
+ import os
|
|
186
|
+ c = []
|
|
187
|
+ for root, dirs, files in os.walk(Gallery,
|
|
188
|
+ topdown=False):
|
|
189
|
+ for name in files:
|
|
190
|
+ # print(name)
|
|
191
|
+ L = os.path.join(root, name)
|
|
192
|
+ c.append(L)
|
|
193
|
+ df = pd.DataFrame(c)
|
|
194
|
+
|
|
195
|
+ df1 = df[0].str.split("/", expand=True)
|
|
196
|
+ #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
|
|
197
|
+ # print('this is df1')
|
|
198
|
+ # print(df1)
|
|
199
|
+ df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
|
|
200
|
+ merge = pd.merge(df2, df1, on='test', how='left')
|
|
201
|
+ merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True)
|
|
202
|
+ # merge.to_csv('merge.csv')
|
|
203
|
+ mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
|
|
204
|
+ mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
|
|
205
|
+ mergesplit = mergesplit.loc[:, 'ImageName']
|
|
206
|
+
|
|
207
|
+ #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
|
|
208
|
+ #merge['EventName'] = merge['abc']
|
|
209
|
+ merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test']
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+ frames = [merge, mergesplit]
|
|
213
|
+
|
|
214
|
+ r = pd.concat(frames, axis=1, join='inner')
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+ df2 = r.dropna(subset=['Matched'])
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+ #df2['Matched'] = df2['Matched'].astype(str)
|
|
221
|
+ #df2['Matched'] = df2['Matched'].astype(int)
|
|
222
|
+ column_list = ['Matched', 'Imagepath', 'ImageName', 'EventName']
|
|
223
|
+ df2[column_list].to_csv('events.csv', index=False)
|
|
224
|
+ df2[column_list].to_json('events.json', orient="records")
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+ # import requests
|
|
231
|
+ # import json
|
|
232
|
+
|
|
233
|
+ # with open('events.json', 'r') as json_file:
|
|
234
|
+ # json_load = json.load(json_file)
|
|
235
|
+ # url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/events/createpredictedimage"
|
|
236
|
+ # #url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/json/eventwisepredicts"
|
|
237
|
+
|
|
238
|
+ # payload = json.dumps(json_load).replace("]", "").replace("[", "")
|
|
239
|
+ # print(payload)
|
|
240
|
+ # headers = {
|
|
241
|
+ # 'Authorization': 'stat bcc78ad858354e759249c1770957fede',
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+ # 'Content-Type': 'application/json'
|
|
245
|
+ # }
|
|
246
|
+ # response = requests.request("POST", url, headers=headers, data=payload)
|
|
247
|
+ # print("Ongoing process with event id = "+str(eventid))
|
|
248
|
+ # print("##############################################################")
|
|
249
|
+ # print(response.text)
|
|
250
|
+
|
|
251
|
+ p.clear()
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+ # Save Image to new directory
|
|
258
|
+ def saveImageToDirectory(image, name, imageName):
|
|
259
|
+ """
|
|
260
|
+ Saves images to directory.
|
|
261
|
+
|
|
262
|
+ Parameters
|
|
263
|
+ ----------
|
|
264
|
+ image : cv2 mat
|
|
265
|
+ Image you want to save.
|
|
266
|
+ name : String
|
|
267
|
+ Directory where you want the image to be saved.
|
|
268
|
+ imageName : String
|
|
269
|
+ Name of image.
|
|
270
|
+
|
|
271
|
+ Returns
|
|
272
|
+ -------
|
|
273
|
+ None.
|
|
274
|
+
|
|
275
|
+ """
|
|
276
|
+ path = "./output/" + name
|
|
277
|
+ path1 = "./output/" + name
|
|
278
|
+ if os.path.exists(path):
|
|
279
|
+ pass
|
|
280
|
+ else:
|
|
281
|
+ os.mkdir(path)
|
|
282
|
+ cv2.imwrite(path + "/" + imageName, image)
|
|
283
|
+ x = []
|
|
284
|
+ c = (path1 + "/" + imageName)
|
|
285
|
+ x.append(c)
|
|
286
|
+ p.append(x)
|
|
287
|
+ f_CSVwrite()
|
|
288
|
+
|
|
289
|
+ # Function for creating encodings for known people
|
|
290
|
+ def processKnownPeopleImages(path=People, saveLocation="./known_encodings.pickle"):
|
|
291
|
+ print(People)
|
|
292
|
+ """
|
|
293
|
+ Process images of known people and create face encodings to compare in future.
|
|
294
|
+ Eaach image should have just 1 face in it.
|
|
295
|
+
|
|
296
|
+ Parameters
|
|
297
|
+ ----------
|
|
298
|
+ path : STRING, optional
|
|
299
|
+ Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
|
|
300
|
+ It should be noted that each image in this dataset should contain only 1 face.
|
|
301
|
+ saveLocation : STRING, optional
|
|
302
|
+ Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
|
|
303
|
+
|
|
304
|
+ Returns
|
|
305
|
+ -------
|
|
306
|
+ None.
|
|
307
|
+
|
|
308
|
+ """
|
|
309
|
+
|
|
310
|
+ known_encodings = []
|
|
311
|
+ known_names = []
|
|
312
|
+ for img in os.listdir(path):
|
|
313
|
+ imgPath = path + img
|
|
314
|
+
|
|
315
|
+ # Read image
|
|
316
|
+ image = cv2.imread(imgPath)
|
|
317
|
+ name = img.rsplit('.')[0]
|
|
318
|
+ # Resize
|
|
319
|
+ image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
|
|
320
|
+
|
|
321
|
+ # Get locations and encodings
|
|
322
|
+ encs, locs = createEncodings(image)
|
|
323
|
+ try:
|
|
324
|
+ known_encodings.append(encs[0])
|
|
325
|
+ except IndexError:
|
|
326
|
+ os.remove(People+img)
|
|
327
|
+ known_names.append(name)
|
|
328
|
+
|
|
329
|
+ for loc in locs:
|
|
330
|
+ top, right, bottom, left = loc
|
|
331
|
+
|
|
332
|
+ # Show Image
|
|
333
|
+ #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
|
|
334
|
+ # cv2.imshow("Image", image)
|
|
335
|
+ # cv2.waitKey(1)
|
|
336
|
+ #cv2.destroyAllWindows()
|
|
337
|
+ saveEncodings(known_encodings, known_names, saveLocation)
|
|
338
|
+
|
|
339
|
+ # Function for processing dataset images
|
|
340
|
+ def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
|
|
341
|
+ """
|
|
342
|
+ Process image in dataset from where you want to separate images.
|
|
343
|
+ It separates the images into directories of known people, groups and any unknown people images.
|
|
344
|
+ Parameters
|
|
345
|
+ ----------
|
|
346
|
+ path : STRING, optional
|
|
347
|
+ Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
|
|
348
|
+ It should be noted that each image in this dataset should contain only 1 face.
|
|
349
|
+ saveLocation : STRING, optional
|
|
350
|
+ Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
|
|
351
|
+
|
|
352
|
+ Returns
|
|
353
|
+ -------
|
|
354
|
+ None.
|
|
355
|
+
|
|
356
|
+ """
|
|
357
|
+ # Read pickle file for known people to compare faces from
|
|
358
|
+ people_encodings, names = readEncodingsPickle("./known_encodings.pickle")
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+ for root, dirs, files in os.walk(Gallery, topdown=False):
|
|
362
|
+
|
|
363
|
+ for name in files:
|
|
364
|
+ s = os.path.join(root, name)
|
|
365
|
+ #print(p)
|
|
366
|
+ # imgPath = path + img
|
|
367
|
+
|
|
368
|
+ # Read image
|
|
369
|
+ image = cv2.imread(s)
|
|
370
|
+ orig = image.copy()
|
|
371
|
+
|
|
372
|
+ # Resize
|
|
373
|
+ image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
|
|
374
|
+
|
|
375
|
+ # Get locations and encodings
|
|
376
|
+ encs, locs = createEncodings(image)
|
|
377
|
+
|
|
378
|
+ # Save image to a group image folder if more than one face is in image
|
|
379
|
+ # if len(locs) > 1:
|
|
380
|
+ # saveImageToDirectory(orig, "Group", img)
|
|
381
|
+
|
|
382
|
+ # Processing image for each face
|
|
383
|
+ i = 0
|
|
384
|
+ knownFlag = 0
|
|
385
|
+ for loc in locs:
|
|
386
|
+ top, right, bottom, left = loc
|
|
387
|
+ unknown_encoding = encs[i]
|
|
388
|
+ i += 1
|
|
389
|
+ acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
|
|
390
|
+ if acceptBool:
|
|
391
|
+ saveImageToDirectory(orig, duplicateName,name)
|
|
392
|
+ knownFlag = 1
|
|
393
|
+ if knownFlag == 1:
|
|
394
|
+ print("Match Found")
|
|
395
|
+ else:
|
|
396
|
+ saveImageToDirectory(orig, "0",name)
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+ # Show Image
|
|
400
|
+ # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
|
|
401
|
+ # # cv2.imshow("Image", image)
|
|
402
|
+ # cv2.waitKey(1)
|
|
403
|
+ # cv2.destroyAllWindows()
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+ def main():
|
|
407
|
+ """
|
|
408
|
+ Main Function.
|
|
409
|
+
|
|
410
|
+ Returns
|
|
411
|
+ -------
|
|
412
|
+ None.
|
|
413
|
+
|
|
414
|
+ """
|
|
415
|
+
|
|
416
|
+ processKnownPeopleImages()
|
|
417
|
+ processDatasetImages()
|
|
418
|
+
|
|
419
|
+ # import pandas as pd
|
|
420
|
+ # q = pd.DataFrame(p)
|
|
421
|
+ # df1 = q
|
|
422
|
+ # print(df1)
|
|
423
|
+ # # df1.to_csv('m.csv')
|
|
424
|
+
|
|
425
|
+ # import pandas as pd
|
|
426
|
+ # import os
|
|
427
|
+ # c = []
|
|
428
|
+ # for root, dirs, files in os.walk(Gallery, topdown=False):
|
|
429
|
+ # for name in files:
|
|
430
|
+ # L = os.path.join(root, name)
|
|
431
|
+ # c.append(L)
|
|
432
|
+ # df2 = pd.DataFrame(c)
|
|
433
|
+ # # df.to_csv('oswalk.csv')
|
|
434
|
+ # import pandas as pd
|
|
435
|
+ # # df1 = pd.read_csv('m.csv')
|
|
436
|
+ # # df2 = pd.read_csv('oswalk.csv')
|
|
437
|
+ # df1 = df1[0].str.split('/', expand=True)
|
|
438
|
+ # df1.rename({df1.columns[-2]: 'Matched'}, axis=1, inplace=True)
|
|
439
|
+ # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
|
|
440
|
+ # df2 = df2[0].str.split("\\", expand=True)
|
|
441
|
+ # df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
|
|
442
|
+ # df2.rename({df2.columns[-2]: 'Eventname'}, axis=1, inplace=True)
|
|
443
|
+ # merge = pd.merge(df2, df1, on='test', how='left')
|
|
444
|
+ # mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
|
|
445
|
+ # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
|
|
446
|
+ # mergesplit = mergesplit.loc[:, 'ImageName']
|
|
447
|
+ # merge['Imagepath'] = "/_files/1/Gallery/" + merge['Eventname'] + '/' + merge['test']
|
|
448
|
+ # frames = [merge, mergesplit]
|
|
449
|
+ # r = pd.concat(frames, axis=1, join='inner')
|
|
450
|
+ # first_column = r.pop('Matched')
|
|
451
|
+ # r.insert(0, 'Matched', first_column)
|
|
452
|
+ # second_column = r.pop('Imagepath')
|
|
453
|
+ # r.insert(1, 'Imagepath', second_column)
|
|
454
|
+ # third_column = r.pop('ImageName')
|
|
455
|
+ # r.insert(2, 'ImageName', third_column)
|
|
456
|
+ # fourth_column = r.pop('Eventname')
|
|
457
|
+ # r.insert(3, 'Eventname', fourth_column)
|
|
458
|
+ # r = r.iloc[:, 0:4]
|
|
459
|
+ # r.sort_values(by=['Matched'], inplace=True)
|
|
460
|
+ # print(r)
|
|
461
|
+ # r.to_csv('path.csv', index=False)
|
|
462
|
+ # r.to_json(r'matched.json', orient="records")
|
|
463
|
+ print("process Ended with event id = "+str(eventid))
|
|
464
|
+
|
|
465
|
+ main()
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+@app.route('/eventwise', methods=["GET", "POST"])
|
|
479
|
+def eventwise():
|
|
480
|
+ if __name__ == "__main__":
|
|
481
|
+
|
|
482
|
+ url_list=[]
|
|
483
|
+ Dataset= request.args.get('Dataset')
|
|
484
|
+ # id = "100013660000125"
|
|
485
|
+ url_list.append(Dataset)
|
|
486
|
+ # multiprocessing
|
|
487
|
+ with multiprocessing.Pool(processes=10) as pool:
|
|
488
|
+ results = pool.map(download,url_list)
|
|
489
|
+ pool.close()
|
|
490
|
+ return "Done"
|
|
491
|
+
|
|
492
|
+if __name__ == "__main__":
|
493
|
493
|
app.run(host="0.0.0.0",port=8081)
|