Bläddra i källkod

Upload files to 'Events/src'

SadhulaSaiKumar 1 år sedan
förälder
incheckning
4cdbec6f68
2 ändrade filer med 563 tillägg och 546 borttagningar
  1. 492
    492
      Events/src/multi_pcs.py
  2. 71
    54
      Events/src/myproject.py

+ 492
- 492
Events/src/multi_pcs.py Visa fil

@@ -1,493 +1,493 @@
1
-import requests
2
-import time
3
-import multiprocessing
4
-from PIL import Image
5
-from functools import partial
6
-import queue
7
-import pickle
8
-import time
9
-
10
-import numpy as np
11
-import face_recognition
12
-import os
13
-from flask import Flask, render_template, request, redirect, send_file
14
-# import shutil
15
-import cv2
16
-import datetime
17
-from flask import request
18
-
19
-# Gallery = "D:/share/biz/mt/Copy_Gallery/" + str(seconds).replace("]", "").replace("[", "").replace("'", "")
20
-# People = 'D:/share/biz/mt/People/' + str(seconds).replace("]", "").replace("[", "").replace("'", "") + "/"
21
-app = Flask(__name__)
22
-
23
-@app.route('/', methods=["GET", "POST"])
24
-def home():
25
-    return "EVENT APP RUNNING.............."
26
-
27
-
28
-
29
-def download(eventid):
30
-    print("process started with event id = "+str(eventid))
31
-
32
-
33
-    Gallery = "/home/ubuntu/AI/Events/Gallery/" + eventid+ "/" 
34
-    People = "/home/ubuntu/AI/Events/guestimage/"+ eventid + "/"
35
-
36
-
37
-
38
-
39
-
40
-    def saveEncodings(encs, names, fname="encodings.pickle"):
41
-        """
42
-        Save encodings in a pickle file to be used in future.
43
-
44
-        Parameters
45
-        ----------
46
-        encs : List of np arrays
47
-            List of face encodings.
48
-        names : List of strings
49
-            List of names for each face encoding.
50
-        fname : String, optional
51
-            Name/Location for pickle file. The default is "encodings.pickle".
52
-
53
-        Returns
54
-        -------
55
-        None.
56
-
57
-        """
58
-
59
-        data = []
60
-        d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
61
-        data.extend(d)
62
-
63
-        encodingsFile = fname
64
-
65
-        # dump the facial encodings data to disk
66
-        print("[INFO] serializing encodings...")
67
-        f = open(encodingsFile, "wb")
68
-        f.write(pickle.dumps(data))
69
-        f.close()
70
-
71
-        # Function to read encodings
72
-
73
-    def readEncodingsPickle(fname):
74
-        """
75
-        Read Pickle file.
76
-
77
-        Parameters
78
-        ----------
79
-        fname : String
80
-            Name of pickle file.(Full location)
81
-
82
-        Returns
83
-        -------
84
-        encodings : list of np arrays
85
-            list of all saved encodings
86
-        names : List of Strings
87
-            List of all saved names
88
-
89
-        """
90
-
91
-        data = pickle.loads(open(fname, "rb").read())
92
-        data = np.array(data)
93
-        encodings = [d["encoding"] for d in data]
94
-        names = [d["name"] for d in data]
95
-        return encodings, names
96
-
97
-    # Function to create encodings and get face locations
98
-    def createEncodings(image):
99
-        print("encoding..")
100
-        #print('Detecting_face...........')
101
-        """
102
-        Create face encodings for a given image and also return face locations in the given image.
103
-
104
-        Parameters
105
-        ----------
106
-        image : cv2 mat
107
-            Image you want to detect faces from.
108
-
109
-        Returns
110
-        -------
111
-        known_encodings : list of np array
112
-            List of face encodings in a given image
113
-        face_locations : list of tuples
114
-            list of tuples for face locations in a given image
115
-
116
-        """
117
-
118
-        # Find face locations for all faces in an image
119
-        face_locations = face_recognition.face_locations(image)
120
-
121
-        # Create encodings for all faces in an image
122
-        known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
123
-        return known_encodings, face_locations
124
-
125
-    # Function to compare encodings
126
-    def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
127
-        """
128
-        Compares face encodings to check if 2 faces are same or not.
129
-
130
-        Parameters
131
-        ----------
132
-        unknown_encoding : np array
133
-            Face encoding of unknown people.
134
-        known_encodings : np array
135
-            Face encodings of known people.
136
-        known_names : list of strings
137
-            Names of known people
138
-
139
-        Returns
140
-        -------
141
-        acceptBool : Bool
142
-            face matched or not
143
-        duplicateName : String
144
-            Name of matched face
145
-        distance : Float
146
-            Distance between 2 faces
147
-
148
-        """
149
-        duplicateName = ""
150
-        distance = 0.0
151
-        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
152
-        face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
153
-        best_match_index = np.argmin(face_distances)
154
-        distance = face_distances[best_match_index]
155
-        if matches[best_match_index]:
156
-            acceptBool = True
157
-            duplicateName = known_names[best_match_index]
158
-        else:
159
-            acceptBool = False
160
-            duplicateName = ""
161
-        return acceptBool, duplicateName, distance
162
-
163
-    p = []
164
-
165
-    def f_CSVwrite():
166
-        import pandas as pd
167
-        q = pd.DataFrame(p)
168
-        #print(q)
169
-        m = q
170
-        # print(m)
171
-        #   x.drop(x.columns[Unnam], axis=1, inplace=True)
172
-        df = m.groupby([0], as_index=False).count()
173
-        z = df[0].str.split('/', expand=True)
174
-
175
-
176
-        z.to_csv('zzzzzzzzzzzzz.csv',index=False)
177
-        import pandas as pd
178
-        df2 = pd.read_csv('zzzzzzzzzzzzz.csv')
179
-        df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
180
-        df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True)
181
-        df2 = df2[['Matched', 'test']]
182
-
183
-
184
-        import pandas as pd
185
-        import os
186
-        c = []
187
-        for root, dirs, files in os.walk(Gallery,
188
-                                         topdown=False):
189
-            for name in files:
190
-                # print(name)
191
-                L = os.path.join(root, name)
192
-                c.append(L)
193
-        df = pd.DataFrame(c)
194
-
195
-        df1 = df[0].str.split("/", expand=True)
196
-        #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
197
-        # print('this is df1')
198
-        # print(df1)
199
-        df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
200
-        merge = pd.merge(df2, df1, on='test', how='left')
201
-        merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True)
202
-        # merge.to_csv('merge.csv')
203
-        mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
204
-        mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
205
-        mergesplit = mergesplit.loc[:, 'ImageName']
206
-
207
-        #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
208
-        #merge['EventName'] = merge['abc']
209
-        merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test']
210
-
211
-
212
-        frames = [merge, mergesplit]
213
-
214
-        r = pd.concat(frames, axis=1, join='inner')
215
-
216
-
217
-        df2 = r.dropna(subset=['Matched'])
218
-
219
-
220
-        #df2['Matched'] = df2['Matched'].astype(str)
221
-        #df2['Matched'] = df2['Matched'].astype(int)
222
-        column_list = ['Matched', 'Imagepath', 'ImageName', 'EventName']
223
-        df2[column_list].to_csv('events.csv', index=False)
224
-        df2[column_list].to_json('events.json', orient="records")
225
-
226
-
227
-
228
-
229
-
230
-        # import requests
231
-        # import json
232
-
233
-        # with open('events.json', 'r') as json_file:
234
-        #     json_load = json.load(json_file)
235
-        #     url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/events/createpredictedimage"
236
-        #     #url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/json/eventwisepredicts"
237
-
238
-        # payload = json.dumps(json_load).replace("]", "").replace("[", "")
239
-        # print(payload)
240
-        # headers = {
241
-        #     'Authorization': 'stat bcc78ad858354e759249c1770957fede',
242
-
243
-            
244
-        #     'Content-Type': 'application/json'
245
-        #     }
246
-        # response = requests.request("POST", url, headers=headers, data=payload)
247
-        # print("Ongoing process with event id = "+str(eventid))
248
-        # print("##############################################################")
249
-        # print(response.text)
250
-
251
-        p.clear()
252
-
253
-
254
-
255
-
256
-
257
-    # Save Image to new directory
258
-    def saveImageToDirectory(image, name, imageName):
259
-        """
260
-        Saves images to directory.
261
-
262
-        Parameters
263
-        ----------
264
-        image : cv2 mat
265
-            Image you want to save.
266
-        name : String
267
-            Directory where you want the image to be saved.
268
-        imageName : String
269
-            Name of image.
270
-
271
-        Returns
272
-        -------
273
-        None.
274
-
275
-        """
276
-        path = "./output/" + name
277
-        path1 = "./output/" + name
278
-        if os.path.exists(path):
279
-            pass
280
-        else:
281
-            os.mkdir(path)
282
-        cv2.imwrite(path + "/" + imageName, image)
283
-        x = []
284
-        c = (path1 + "/" + imageName)
285
-        x.append(c)
286
-        p.append(x)
287
-        f_CSVwrite()
288
-
289
-    # Function for creating encodings for known people
290
-    def processKnownPeopleImages(path=People, saveLocation="./known_encodings.pickle"):
291
-        print(People)
292
-        """
293
-        Process images of known people and create face encodings to compare in future.
294
-        Eaach image should have just 1 face in it.
295
-
296
-        Parameters
297
-        ----------
298
-        path : STRING, optional
299
-            Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
300
-            It should be noted that each image in this dataset should contain only 1 face.
301
-        saveLocation : STRING, optional
302
-            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
303
-
304
-        Returns
305
-        -------
306
-        None.
307
-
308
-        """
309
-
310
-        known_encodings = []
311
-        known_names = []
312
-        for img in os.listdir(path):
313
-            imgPath = path + img
314
-
315
-            # Read image
316
-            image = cv2.imread(imgPath)
317
-            name = img.rsplit('.')[0]
318
-            # Resize
319
-            image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
320
-
321
-            # Get locations and encodings
322
-            encs, locs = createEncodings(image)
323
-            try:
324
-                known_encodings.append(encs[0])
325
-            except IndexError:
326
-                os.remove(People+img)
327
-            known_names.append(name)
328
-
329
-            for loc in locs:
330
-                top, right, bottom, left = loc
331
-
332
-            # Show Image
333
-            #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
334
-            # cv2.imshow("Image", image)
335
-           # cv2.waitKey(1)
336
-            #cv2.destroyAllWindows()
337
-        saveEncodings(known_encodings, known_names, saveLocation)
338
-
339
-    # Function for processing dataset images
340
-    def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
341
-        """
342
-        Process image in dataset from where you want to separate images.
343
-        It separates the images into directories of known people, groups and any unknown people images.
344
-        Parameters
345
-        ----------
346
-        path : STRING, optional
347
-            Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
348
-            It should be noted that each image in this dataset should contain only 1 face.
349
-        saveLocation : STRING, optional
350
-            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
351
-
352
-        Returns
353
-        -------
354
-        None.
355
-
356
-        """
357
-        # Read pickle file for known people to compare faces from
358
-        people_encodings, names = readEncodingsPickle("./known_encodings.pickle")
359
-
360
-
361
-        for root, dirs, files in os.walk(Gallery, topdown=False):
362
-
363
-            for name in files:
364
-                s = os.path.join(root, name)
365
-                #print(p)
366
-          #  imgPath = path + img
367
-
368
-            # Read image
369
-                image = cv2.imread(s)
370
-                orig = image.copy()
371
-
372
-            # Resize
373
-                image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
374
-
375
-            # Get locations and encodings
376
-                encs, locs = createEncodings(image)
377
-
378
-            # Save image to a group image folder if more than one face is in image
379
-            # if len(locs) > 1:
380
-            #     saveImageToDirectory(orig, "Group", img)
381
-
382
-            # Processing image for each face
383
-                i = 0
384
-                knownFlag = 0
385
-                for loc in locs:
386
-                    top, right, bottom, left = loc
387
-                    unknown_encoding = encs[i]
388
-                    i += 1
389
-                    acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
390
-                    if acceptBool:
391
-                        saveImageToDirectory(orig, duplicateName,name)
392
-                        knownFlag = 1
393
-                if knownFlag == 1:
394
-                    print("Match Found")
395
-                else:
396
-                    saveImageToDirectory(orig, "0",name)
397
-
398
-
399
-            # Show Image
400
-            # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
401
-            # # cv2.imshow("Image", image)
402
-            # cv2.waitKey(1)
403
-            # cv2.destroyAllWindows()
404
-
405
-
406
-    def main():
407
-        """
408
-        Main Function.
409
-
410
-        Returns
411
-        -------
412
-        None.
413
-
414
-        """
415
-
416
-        processKnownPeopleImages()
417
-        processDatasetImages()
418
-
419
-        # import pandas as pd
420
-        # q = pd.DataFrame(p)
421
-        # df1 = q
422
-        # print(df1)
423
-        # # df1.to_csv('m.csv')
424
-
425
-        # import pandas as pd
426
-        # import os
427
-        # c = []
428
-        # for root, dirs, files in os.walk(Gallery, topdown=False):
429
-        #     for name in files:
430
-        #         L = os.path.join(root, name)
431
-        #         c.append(L)
432
-        # df2 = pd.DataFrame(c)
433
-        # # df.to_csv('oswalk.csv')
434
-        # import pandas as pd
435
-        # # df1 = pd.read_csv('m.csv')
436
-        # # df2 = pd.read_csv('oswalk.csv')
437
-        # df1 = df1[0].str.split('/', expand=True)
438
-        # df1.rename({df1.columns[-2]: 'Matched'}, axis=1, inplace=True)
439
-        # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
440
-        # df2 = df2[0].str.split("\\", expand=True)
441
-        # df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
442
-        # df2.rename({df2.columns[-2]: 'Eventname'}, axis=1, inplace=True)
443
-        # merge = pd.merge(df2, df1, on='test', how='left')
444
-        # mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
445
-        # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
446
-        # mergesplit = mergesplit.loc[:, 'ImageName']
447
-        # merge['Imagepath'] = "/_files/1/Gallery/" + merge['Eventname'] + '/' + merge['test']
448
-        # frames = [merge, mergesplit]
449
-        # r = pd.concat(frames, axis=1, join='inner')
450
-        # first_column = r.pop('Matched')
451
-        # r.insert(0, 'Matched', first_column)
452
-        # second_column = r.pop('Imagepath')
453
-        # r.insert(1, 'Imagepath', second_column)
454
-        # third_column = r.pop('ImageName')
455
-        # r.insert(2, 'ImageName', third_column)
456
-        # fourth_column = r.pop('Eventname')
457
-        # r.insert(3, 'Eventname', fourth_column)
458
-        # r = r.iloc[:, 0:4]
459
-        # r.sort_values(by=['Matched'], inplace=True)
460
-        # print(r)
461
-        # r.to_csv('path.csv', index=False)
462
-        # r.to_json(r'matched.json', orient="records")
463
-        print("process Ended with event id = "+str(eventid))
464
-
465
-    main()
466
-
467
-
468
-
469
-
470
-
471
-
472
-
473
-
474
-
475
-
476
-
477
-
478
-@app.route('/eventwise', methods=["GET", "POST"])
479
-def eventwise():
480
-    if __name__ == "__main__":
481
-
482
-        url_list=[]
483
-        Dataset= request.args.get('Dataset')
484
-        # id = "100013660000125"
485
-        url_list.append(Dataset)
486
-        # multiprocessing
487
-        with multiprocessing.Pool(processes=10) as pool:
488
-            results = pool.map(download,url_list)
489
-        pool.close()
490
-        return "none"
491
-
492
-if __name__ == "__main__":
1
+import requests
2
+import time
3
+import multiprocessing
4
+from PIL import Image
5
+from functools import partial
6
+import queue
7
+import pickle
8
+import time
9
+
10
+import numpy as np
11
+import face_recognition
12
+import os
13
+from flask import Flask, render_template, request, redirect, send_file
14
+# import shutil
15
+import cv2
16
+import datetime
17
+from flask import request
18
+
19
+# Gallery = "D:/share/biz/mt/Copy_Gallery/" + str(seconds).replace("]", "").replace("[", "").replace("'", "")
20
+# People = 'D:/share/biz/mt/People/' + str(seconds).replace("]", "").replace("[", "").replace("'", "") + "/"
21
+app = Flask(__name__)
22
+
23
+@app.route('/', methods=["GET", "POST"])
24
+def home():
25
+    return "EVENT APP RUNNING.............."
26
+
27
+
28
+
29
+def download(eventid):
30
+    print("process started with event id = "+str(eventid))
31
+
32
+
33
+    Gallery = "/home/ubuntu/AI/Events/Gallery/" + eventid+ "/" 
34
+    People = "/home/ubuntu/AI/Events/guestimage/"+ eventid + "/"
35
+
36
+
37
+
38
+
39
+
40
+    def saveEncodings(encs, names, fname="encodings.pickle"):
41
+        """
42
+        Save encodings in a pickle file to be used in future.
43
+
44
+        Parameters
45
+        ----------
46
+        encs : List of np arrays
47
+            List of face encodings.
48
+        names : List of strings
49
+            List of names for each face encoding.
50
+        fname : String, optional
51
+            Name/Location for pickle file. The default is "encodings.pickle".
52
+
53
+        Returns
54
+        -------
55
+        None.
56
+
57
+        """
58
+
59
+        data = []
60
+        d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
61
+        data.extend(d)
62
+
63
+        encodingsFile = fname
64
+
65
+        # dump the facial encodings data to disk
66
+        print("[INFO] serializing encodings...")
67
+        f = open(encodingsFile, "wb")
68
+        f.write(pickle.dumps(data))
69
+        f.close()
70
+
71
+        # Function to read encodings
72
+
73
+    def readEncodingsPickle(fname):
74
+        """
75
+        Read Pickle file.
76
+
77
+        Parameters
78
+        ----------
79
+        fname : String
80
+            Name of pickle file.(Full location)
81
+
82
+        Returns
83
+        -------
84
+        encodings : list of np arrays
85
+            list of all saved encodings
86
+        names : List of Strings
87
+            List of all saved names
88
+
89
+        """
90
+
91
+        data = pickle.loads(open(fname, "rb").read())
92
+        data = np.array(data)
93
+        encodings = [d["encoding"] for d in data]
94
+        names = [d["name"] for d in data]
95
+        return encodings, names
96
+
97
+    # Function to create encodings and get face locations
98
+    def createEncodings(image):
99
+        print("encoding..")
100
+        #print('Detecting_face...........')
101
+        """
102
+        Create face encodings for a given image and also return face locations in the given image.
103
+
104
+        Parameters
105
+        ----------
106
+        image : cv2 mat
107
+            Image you want to detect faces from.
108
+
109
+        Returns
110
+        -------
111
+        known_encodings : list of np array
112
+            List of face encodings in a given image
113
+        face_locations : list of tuples
114
+            list of tuples for face locations in a given image
115
+
116
+        """
117
+
118
+        # Find face locations for all faces in an image
119
+        face_locations = face_recognition.face_locations(image)
120
+
121
+        # Create encodings for all faces in an image
122
+        known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
123
+        return known_encodings, face_locations
124
+
125
+    # Function to compare encodings
126
+    def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
127
+        """
128
+        Compares face encodings to check if 2 faces are same or not.
129
+
130
+        Parameters
131
+        ----------
132
+        unknown_encoding : np array
133
+            Face encoding of unknown people.
134
+        known_encodings : np array
135
+            Face encodings of known people.
136
+        known_names : list of strings
137
+            Names of known people
138
+
139
+        Returns
140
+        -------
141
+        acceptBool : Bool
142
+            face matched or not
143
+        duplicateName : String
144
+            Name of matched face
145
+        distance : Float
146
+            Distance between 2 faces
147
+
148
+        """
149
+        duplicateName = ""
150
+        distance = 0.0
151
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
152
+        face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
153
+        best_match_index = np.argmin(face_distances)
154
+        distance = face_distances[best_match_index]
155
+        if matches[best_match_index]:
156
+            acceptBool = True
157
+            duplicateName = known_names[best_match_index]
158
+        else:
159
+            acceptBool = False
160
+            duplicateName = ""
161
+        return acceptBool, duplicateName, distance
162
+
163
+    p = []
164
+
165
+    def f_CSVwrite():
166
+        import pandas as pd
167
+        q = pd.DataFrame(p)
168
+        #print(q)
169
+        m = q
170
+        # print(m)
171
+        #   x.drop(x.columns[Unnam], axis=1, inplace=True)
172
+        df = m.groupby([0], as_index=False).count()
173
+        z = df[0].str.split('/', expand=True)
174
+
175
+
176
+        z.to_csv('zzzzzzzzzzzzz.csv',index=False)
177
+        import pandas as pd
178
+        df2 = pd.read_csv('zzzzzzzzzzzzz.csv')
179
+        df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
180
+        df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True)
181
+        df2 = df2[['Matched', 'test']]
182
+
183
+
184
+        import pandas as pd
185
+        import os
186
+        c = []
187
+        for root, dirs, files in os.walk(Gallery,
188
+                                         topdown=False):
189
+            for name in files:
190
+                # print(name)
191
+                L = os.path.join(root, name)
192
+                c.append(L)
193
+        df = pd.DataFrame(c)
194
+
195
+        df1 = df[0].str.split("/", expand=True)
196
+        #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
197
+        # print('this is df1')
198
+        # print(df1)
199
+        df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
200
+        merge = pd.merge(df2, df1, on='test', how='left')
201
+        merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True)
202
+        # merge.to_csv('merge.csv')
203
+        mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
204
+        mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
205
+        mergesplit = mergesplit.loc[:, 'ImageName']
206
+
207
+        #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
208
+        #merge['EventName'] = merge['abc']
209
+        merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test']
210
+
211
+
212
+        frames = [merge, mergesplit]
213
+
214
+        r = pd.concat(frames, axis=1, join='inner')
215
+
216
+
217
+        df2 = r.dropna(subset=['Matched'])
218
+
219
+
220
+        #df2['Matched'] = df2['Matched'].astype(str)
221
+        #df2['Matched'] = df2['Matched'].astype(int)
222
+        column_list = ['Matched', 'Imagepath', 'ImageName', 'EventName']
223
+        df2[column_list].to_csv('events.csv', index=False)
224
+        df2[column_list].to_json('events.json', orient="records")
225
+
226
+
227
+
228
+
229
+
230
+        # import requests
231
+        # import json
232
+
233
+        # with open('events.json', 'r') as json_file:
234
+        #     json_load = json.load(json_file)
235
+        #     url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/events/createpredictedimage"
236
+        #     #url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/json/eventwisepredicts"
237
+
238
+        # payload = json.dumps(json_load).replace("]", "").replace("[", "")
239
+        # print(payload)
240
+        # headers = {
241
+        #     'Authorization': 'stat bcc78ad858354e759249c1770957fede',
242
+
243
+            
244
+        #     'Content-Type': 'application/json'
245
+        #     }
246
+        # response = requests.request("POST", url, headers=headers, data=payload)
247
+        # print("Ongoing process with event id = "+str(eventid))
248
+        # print("##############################################################")
249
+        # print(response.text)
250
+
251
+        p.clear()
252
+
253
+
254
+
255
+
256
+
257
+    # Save Image to new directory
258
+    def saveImageToDirectory(image, name, imageName):
259
+        """
260
+        Saves images to directory.
261
+
262
+        Parameters
263
+        ----------
264
+        image : cv2 mat
265
+            Image you want to save.
266
+        name : String
267
+            Directory where you want the image to be saved.
268
+        imageName : String
269
+            Name of image.
270
+
271
+        Returns
272
+        -------
273
+        None.
274
+
275
+        """
276
+        path = "./output/" + name
277
+        path1 = "./output/" + name
278
+        if os.path.exists(path):
279
+            pass
280
+        else:
281
+            os.mkdir(path)
282
+        cv2.imwrite(path + "/" + imageName, image)
283
+        x = []
284
+        c = (path1 + "/" + imageName)
285
+        x.append(c)
286
+        p.append(x)
287
+        f_CSVwrite()
288
+
289
+    # Function for creating encodings for known people
290
+    def processKnownPeopleImages(path=People, saveLocation="./known_encodings.pickle"):
291
+        print(People)
292
+        """
293
+        Process images of known people and create face encodings to compare in future.
294
+        Eaach image should have just 1 face in it.
295
+
296
+        Parameters
297
+        ----------
298
+        path : STRING, optional
299
+            Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
300
+            It should be noted that each image in this dataset should contain only 1 face.
301
+        saveLocation : STRING, optional
302
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
303
+
304
+        Returns
305
+        -------
306
+        None.
307
+
308
+        """
309
+
310
+        known_encodings = []
311
+        known_names = []
312
+        for img in os.listdir(path):
313
+            imgPath = path + img
314
+
315
+            # Read image
316
+            image = cv2.imread(imgPath)
317
+            name = img.rsplit('.')[0]
318
+            # Resize
319
+            image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
320
+
321
+            # Get locations and encodings
322
+            encs, locs = createEncodings(image)
323
+            try:
324
+                known_encodings.append(encs[0])
325
+            except IndexError:
326
+                os.remove(People+img)
327
+            known_names.append(name)
328
+
329
+            for loc in locs:
330
+                top, right, bottom, left = loc
331
+
332
+            # Show Image
333
+            #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
334
+            # cv2.imshow("Image", image)
335
+           # cv2.waitKey(1)
336
+            #cv2.destroyAllWindows()
337
+        saveEncodings(known_encodings, known_names, saveLocation)
338
+
339
+    # Function for processing dataset images
340
+    def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
341
+        """
342
+        Process image in dataset from where you want to separate images.
343
+        It separates the images into directories of known people, groups and any unknown people images.
344
+        Parameters
345
+        ----------
346
+        path : STRING, optional
347
+            Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
348
+            It should be noted that each image in this dataset should contain only 1 face.
349
+        saveLocation : STRING, optional
350
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
351
+
352
+        Returns
353
+        -------
354
+        None.
355
+
356
+        """
357
+        # Read pickle file for known people to compare faces from
358
+        people_encodings, names = readEncodingsPickle("./known_encodings.pickle")
359
+
360
+
361
+        for root, dirs, files in os.walk(Gallery, topdown=False):
362
+
363
+            for name in files:
364
+                s = os.path.join(root, name)
365
+                #print(p)
366
+          #  imgPath = path + img
367
+
368
+            # Read image
369
+                image = cv2.imread(s)
370
+                orig = image.copy()
371
+
372
+            # Resize
373
+                image = cv2.resize(image, (0, 0), fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
374
+
375
+            # Get locations and encodings
376
+                encs, locs = createEncodings(image)
377
+
378
+            # Save image to a group image folder if more than one face is in image
379
+            # if len(locs) > 1:
380
+            #     saveImageToDirectory(orig, "Group", img)
381
+
382
+            # Processing image for each face
383
+                i = 0
384
+                knownFlag = 0
385
+                for loc in locs:
386
+                    top, right, bottom, left = loc
387
+                    unknown_encoding = encs[i]
388
+                    i += 1
389
+                    acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
390
+                    if acceptBool:
391
+                        saveImageToDirectory(orig, duplicateName,name)
392
+                        knownFlag = 1
393
+                if knownFlag == 1:
394
+                    print("Match Found")
395
+                else:
396
+                    saveImageToDirectory(orig, "0",name)
397
+
398
+
399
+            # Show Image
400
+            # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
401
+            # # cv2.imshow("Image", image)
402
+            # cv2.waitKey(1)
403
+            # cv2.destroyAllWindows()
404
+
405
+
406
+    def main():
407
+        """
408
+        Main Function.
409
+
410
+        Returns
411
+        -------
412
+        None.
413
+
414
+        """
415
+
416
+        processKnownPeopleImages()
417
+        processDatasetImages()
418
+
419
+        # import pandas as pd
420
+        # q = pd.DataFrame(p)
421
+        # df1 = q
422
+        # print(df1)
423
+        # # df1.to_csv('m.csv')
424
+
425
+        # import pandas as pd
426
+        # import os
427
+        # c = []
428
+        # for root, dirs, files in os.walk(Gallery, topdown=False):
429
+        #     for name in files:
430
+        #         L = os.path.join(root, name)
431
+        #         c.append(L)
432
+        # df2 = pd.DataFrame(c)
433
+        # # df.to_csv('oswalk.csv')
434
+        # import pandas as pd
435
+        # # df1 = pd.read_csv('m.csv')
436
+        # # df2 = pd.read_csv('oswalk.csv')
437
+        # df1 = df1[0].str.split('/', expand=True)
438
+        # df1.rename({df1.columns[-2]: 'Matched'}, axis=1, inplace=True)
439
+        # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
440
+        # df2 = df2[0].str.split("\\", expand=True)
441
+        # df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
442
+        # df2.rename({df2.columns[-2]: 'Eventname'}, axis=1, inplace=True)
443
+        # merge = pd.merge(df2, df1, on='test', how='left')
444
+        # mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
445
+        # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
446
+        # mergesplit = mergesplit.loc[:, 'ImageName']
447
+        # merge['Imagepath'] = "/_files/1/Gallery/" + merge['Eventname'] + '/' + merge['test']
448
+        # frames = [merge, mergesplit]
449
+        # r = pd.concat(frames, axis=1, join='inner')
450
+        # first_column = r.pop('Matched')
451
+        # r.insert(0, 'Matched', first_column)
452
+        # second_column = r.pop('Imagepath')
453
+        # r.insert(1, 'Imagepath', second_column)
454
+        # third_column = r.pop('ImageName')
455
+        # r.insert(2, 'ImageName', third_column)
456
+        # fourth_column = r.pop('Eventname')
457
+        # r.insert(3, 'Eventname', fourth_column)
458
+        # r = r.iloc[:, 0:4]
459
+        # r.sort_values(by=['Matched'], inplace=True)
460
+        # print(r)
461
+        # r.to_csv('path.csv', index=False)
462
+        # r.to_json(r'matched.json', orient="records")
463
+        print("process Ended with event id = "+str(eventid))
464
+
465
+    main()
466
+
467
+
468
+
469
+
470
+
471
+
472
+
473
+
474
+
475
+
476
+
477
+
478
+@app.route('/eventwise', methods=["GET", "POST"])
479
+def eventwise():
480
+    if __name__ == "__main__":
481
+
482
+        url_list=[]
483
+        Dataset= request.args.get('Dataset')
484
+        # id = "100013660000125"
485
+        url_list.append(Dataset)
486
+        # multiprocessing
487
+        with multiprocessing.Pool(processes=10) as pool:
488
+            results = pool.map(download,url_list)
489
+        pool.close()
490
+        return "Done"
491
+
492
+if __name__ == "__main__":
493 493
     app.run(host="0.0.0.0",port=8081)

+ 71
- 54
Events/src/myproject.py Visa fil

@@ -8,8 +8,8 @@ import cv2
8 8
 
9 9
 app = Flask(__name__)
10 10
 app.config["IMAGE_UPLOADS"] = "C:/Users/Bizgaze/PycharmProjects/face_recogniction/People"
11
-datasetPath = "./Gallery/"
12
-peoplePath = "./guestimage/"
11
+datasetPath = "/opt/bizgaze/events.bizgaze.app/wwwroot/_files/1/Gallery/"
12
+peoplePath = "/opt/bizgaze/events.bizgaze.app/wwwroot/_files/People/"
13 13
 @app.route('/', methods=['GET'])
14 14
 def home():
15 15
     return render_template('index.html')
@@ -44,7 +44,9 @@ def upload():
44 44
 
45 45
 @app.route('/predict', methods=["GET", "POST"])
46 46
 def predict():
47
+    print('starting')
47 48
     def saveEncodings(encs, names, fname="encodings.pickle"):
49
+        print('encoding')
48 50
         """
49 51
         Save encodings in a pickle file to be used in future.
50 52
 
@@ -153,7 +155,7 @@ def predict():
153 155
         """
154 156
         duplicateName = ""
155 157
         distance = 0.0
156
-        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.5)
158
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
157 159
         face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
158 160
         best_match_index = np.argmin(face_distances)
159 161
         distance = face_distances[best_match_index]
@@ -324,9 +326,9 @@ def predict():
324 326
 
325 327
         processKnownPeopleImages()
326 328
         processDatasetImages()
327
-        shutil.make_archive('./Images', 'zip','./output')
328
-        p='./Images.zip'
329
-        return send_file(p,as_attachment=True)
329
+        # shutil.make_archive('./Images', 'zip','./output')
330
+        # p='./Images.zip'
331
+        # return send_file(p,as_attachment=True)
330 332
 
331 333
 
332 334
         # import pandas as pd
@@ -343,52 +345,65 @@ def predict():
343 345
 
344 346
 
345 347
 ##############################csv creation code ##############################
346
-        # import pandas as pd
347
-        # q = pd.DataFrame(p)
348
-        # m = q
349
-        # print(m)
350
-        # #   x.drop(x.columns[Unnam], axis=1, inplace=True)
351
-        # df = m.groupby([0], as_index=False).count()
352
-        # z = df[0].str.split('/', expand=True)
353
-
354
-        # z['ImagePath'] = z[3]
355
-
356
-        # result = z.drop([0,1,3], axis=1)
357
-        # result.rename({result.columns[-1]: 'test'}, axis=1, inplace=True)
358
-        # # print(result)
359
-        # result.to_csv('results1.csv')
360
-        # import pandas as pd
361
-        # import os
362
-        # c = []
363
-        # for root, dirs, files in os.walk("./Dataset", topdown=False):
364
-        #     for name in files:
365
-        #         # print(name)
366
-        #         L = os.path.join(root, name)
367
-        #         c.append(L)
368
-        # df = pd.DataFrame(c)
369
-
370
-        # df1 = df[0].str.split("/", expand=True)
371
-        # df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
372
-        # print('this is df1')
373
-        # print(df1)
374
-        # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
375
-        # merge = pd.merge(df1, result, on='test', how='left')
376
-        # merge.to_csv('merge.csv')
377
-        # mergesplit = merge.loc[:,'test'].str.split(".", expand=True)
378
-        # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
379
-        # mergesplit =  mergesplit.loc[:,'ImageName' ]
380
-
381
-        # merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
382
-        # merge['EventName'] = merge['abc']
383
-        # merge['Imagepath']="/_files/1/Gallery/"+merge['EventName']+'/'+ + merge['test']
384
-
385
-        # frames = [merge, mergesplit]
386
-
387
-        # r = pd.concat(frames, axis=1, join='inner')
388
-        # r=r.iloc[:,3:]
389
-        # print(r)
390
-        # r.to_csv('path.csv', index=False)
391
-        # r.to_json(r'./matched.json', orient="records")
348
+        import pandas as pd
349
+        q = pd.DataFrame(p)
350
+        m = q
351
+        #print(m)
352
+        #   x.drop(x.columns[Unnam], axis=1, inplace=True)
353
+        df = m.groupby([0], as_index=False).count()
354
+        first_column_name = df.columns[0]
355
+
356
+        # Rename the first column
357
+        df.rename(columns={first_column_name: 'col'}, inplace=True)
358
+        #print(df)
359
+        z = df['col'].str.split('/', expand=True)
360
+
361
+        z['ImagePath'] = z[3]
362
+
363
+        result = z.drop([0,1,3], axis=1)
364
+        result.rename({result.columns[-1]: 'test'}, axis=1, inplace=True)
365
+        # print(result)
366
+        result.to_csv('results1.csv')
367
+        import pandas as pd
368
+        import os
369
+        c = []
370
+        for root, dirs, files in os.walk(datasetPath, topdown=False):
371
+            for name in files:
372
+                # print(name)
373
+                L = os.path.join(root, name)
374
+                c.append(L)
375
+        df = pd.DataFrame(c)
376
+        #print('seconfdf')
377
+      
378
+        first_column_name = df.columns[0]
379
+
380
+        # Rename the first column
381
+        df.rename(columns={first_column_name: 'col'}, inplace=True)
382
+        print(df)
383
+        df1 = df['col'].str.split("/", expand=True)
384
+        df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
385
+        #print('this is df1')
386
+        #print(df1)
387
+        df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
388
+        merge = pd.merge(df1, result, on='test', how='left')
389
+        merge.to_csv('merge.csv')
390
+        mergesplit = merge.loc[:,'test'].str.split(".", expand=True)
391
+        mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
392
+        mergesplit =  mergesplit.loc[:,'ImageName' ]
393
+
394
+        merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
395
+        merge['EventName'] = merge['abc']
396
+        merge['Imagepath']="/_files/1/Gallery/"+merge['EventName']+'/'+ + merge['test']
397
+
398
+        frames = [merge, mergesplit]
399
+
400
+        r = pd.concat(frames, axis=1, join='inner')
401
+        r=r.iloc[:,3:]
402
+        #print(r)
403
+        r.to_csv('path.csv', index=False)
404
+        #r.to_json(r'./matched.json', orient="records")
405
+        column_list = ['Matched','Imagepath', 'ImageName', 'EventName']
406
+        r[column_list].to_json('matched.json', orient="records")
392 407
 
393 408
 #############################################################################################
394 409
 
@@ -477,8 +492,10 @@ def predict():
477 492
         main()
478 493
 
479 494
     #    return render_template('index.html')
495
+    p = './matched.json'
496
+    return send_file(p,as_attachment=True)
480 497
 
481
-    return 'ALL IMAGES MATCHED'
498
+   # return 'ALL IMAGES MATCHED'
482 499
 
483 500
 
484 501
 @app.route('/json')
@@ -488,6 +505,6 @@ def json():
488 505
 
489 506
 
490 507
 if __name__ == "__main__":
491
-    app.run(host="0.0.0.0",port=8081,debug=True)
508
+    app.run(host="0.0.0.0",port=8081)
492 509
 
493 510
 

Laddar…
Avbryt
Spara