Преглед на файлове

Upload files to 'Attendence'

SadhulaSaiKumar преди 1 година
родител
ревизия
d198ac8073
променени са 5 файла, в които са добавени 2091 реда и са изтрити 2 реда
  1. 559
    0
      Attendence/anwi_attendence.py
  2. 14
    2
      Attendence/attendence.py
  3. 402
    0
      Attendence/requirements.txt
  4. 557
    0
      Attendence/test_attendence.py
  5. 559
    0
      Attendence/uat_attendence.py

+ 559
- 0
Attendence/anwi_attendence.py Целия файл

@@ -0,0 +1,559 @@
1
+from flask import Flask, render_template, request, redirect, Response, send_file
2
+import multiprocessing
3
+import face_recognition
4
+#from numba import jit
5
+import numpy as np
6
+import os
7
+#from flask_cors import CORS
8
+app = Flask(__name__)
9
+#CORS(app)
10
+lst = []
11
+
12
+
13
+@app.route('/', methods=['GET'])
14
+def resume():
15
+    #return render_template('index.html')
16
+    return 'Attendence app running'
17
+
18
+def createEncodings(image):
19
+    print("Encoding")
20
+    """
21
+    Create face encodings for a given image and also return face locations in the given image.
22
+    Parameters
23
+    -------
24
+    image : cv2 mat
25
+        Image you want to detect faces from.
26
+    Returns
27
+    -------
28
+    known_encodings : list of np array
29
+        List of face encodings in a given image
30
+    face_locations : list of tuples
31
+        list of tuples for face locations in a given image
32
+    """
33
+    # Find face locations for all faces in an image
34
+    face_locations = face_recognition.face_locations(image)
35
+    # Create encodings for all faces in an image
36
+    known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
37
+    return known_encodings, face_locations
38
+#@app.route('/register', methods=["POST","GET"])
39
+def registered(url_list):
40
+    input=url_list
41
+
42
+    from pathlib import Path
43
+    Path("ppeople").mkdir(exist_ok=True)
44
+    Path("ppeople/" + input["FileName"]).mkdir(exist_ok=True)
45
+
46
+    a = input
47
+    # print(a)
48
+    x = a['FileData']
49
+    # print(x)
50
+    y = a['FileName']
51
+    #z = a['FileType']
52
+    z='jpg'
53
+    # CreatedBy=a['CreatedBy']
54
+
55
+    name = y+ '.'+ z
56
+    print(name)
57
+    # print(y)
58
+    # image = y.split("/")
59
+    # filename=image[-1]
60
+
61
+    # print(x)
62
+    try:
63
+        img_data = x.encode()
64
+    except AttributeError:
65
+        return "Successfully saved encoding........."
66
+
67
+    import base64
68
+
69
+    with open("ppeople/" + input["FileName"] + "/" + name, "wb") as fh:
70
+        fh.write(base64.decodebytes(img_data))
71
+
72
+    img = "ppeople/" + y + "/" + name
73
+    saveLocation = "ppeople/" + y + "/" + y + ".pickle"
74
+
75
+    ############ detecting no of faceses #######################
76
+
77
+
78
+    # import cv2
79
+    # import numpy as np
80
+    # import dlib
81
+
82
+
83
+    # # Connects to your computer's default camera
84
+    # cap = cv2.imread(img)
85
+
86
+    # # Detect the coordinates
87
+    # detector = dlib.get_frontal_face_detector()
88
+
89
+    # number_of_faces=[]
90
+    # # Capture frames continuously
91
+    # # while True:
92
+
93
+    # # Capture frame-by-frame
94
+    # # ret, frame = cap
95
+    # frame = cap
96
+
97
+    # # RGB to grayscale
98
+    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
99
+    # faces = detector(gray)
100
+
101
+    # # Iterator to count faces
102
+    # i = 0
103
+    # for face in faces:
104
+
105
+    #     # Get the coordinates of faces
106
+    #     x, y = face.left(), face.top()
107
+    #     x1, y1 = face.right(), face.bottom()
108
+    #     cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)
109
+    # # Increment iterator for each face in faces
110
+    #     i = i+1
111
+
112
+    # # Display the box and faces
113
+    #     cv2.putText(frame, 'face num'+str(i), (x-10, y-10),
114
+    #                 cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
115
+        
116
+    #     # if len(i)>1:
117
+    #     #     print(i)
118
+    #     number_of_faces.append(i)
119
+
120
+
121
+    # if (len(number_of_faces))>1:
122
+    #     print("Group Photo")
123
+    #     return "Group Photo"
124
+    # elif (len(number_of_faces))==1:
125
+    #     print("Single Photo")
126
+    #     pass
127
+
128
+
129
+
130
+
131
+
132
+    def saveEncodings(encs, names, fname='encodings.pickle'):
133
+        """
134
+        Save encodings in a pickle file to be used in future.
135
+
136
+        Parameters
137
+        ----------
138
+        encs : List of np arrays
139
+            List of face encodings.
140
+        names : List of strings
141
+            List of names for each face encoding.
142
+        fname : String, optional
143
+            Name/Location for pickle file. The default is "encodings.pickle".
144
+
145
+        Returns
146
+        -------
147
+        None.
148
+
149
+        """
150
+
151
+        data = []
152
+        d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
153
+        data.extend(d)
154
+
155
+        encodingsFile = fname
156
+
157
+        # dump the facial encodings data to disk
158
+        print("[INFO] serializing encodings...")
159
+        print("[INFO] Encodings Created sucessfully")
160
+        f = open(encodingsFile, "wb")
161
+        f.write(pickle.dumps(data))
162
+        f.close()
163
+
164
+    # Function to create encodings and get face locations
165
+
166
+
167
+    def processKnownPeopleImages(img=img, saveLocation=saveLocation):
168
+        """
169
+        Process images of known people and create face encodings to compare in future.
170
+        Eaach image should have just 1 face in it.
171
+        Parameters
172
+        ----------
173
+        path : STRING, optional
174
+            Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
175
+            It should be noted that each image in this dataset should contain only 1 face.
176
+        saveLocation : STRING, optional
177
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
178
+        Returns
179
+        -------
180
+        None.
181
+        """
182
+        known_encodings = []
183
+        known_names = []
184
+        # for img in os.listdir(path):
185
+
186
+        imgPath = img
187
+        # Read image
188
+        image = cv2.imread(imgPath)
189
+        name = img.rsplit('.')[0]
190
+        # Resize
191
+        try:
192
+            print(image.shape)
193
+        except AttributeError:
194
+            return "Successfully saved encoding........."
195
+        image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
196
+        # Get locations and encodings
197
+        encs, locs = createEncodings(image)
198
+        try:
199
+            known_encodings.append(encs[0])
200
+        except IndexError:
201
+            os.remove(saveLocation)
202
+            print('------------------------------------- save location --------------------------------')
203
+            print(saveLocation)
204
+            return "hello world!"
205
+        
206
+            
207
+        # known_encodings.append(encs[0])
208
+        known_names.append(name)
209
+        for loc in locs:
210
+            top, right, bottom, left = loc
211
+        # Show Image
212
+        # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
213
+        # cv2.imshow("Image", image)
214
+        # cv2.waitKey(1)
215
+        # cv2.destroyAllWindows()
216
+        saveEncodings(known_encodings, known_names, saveLocation)
217
+
218
+    import cv2
219
+    #import face_recognition
220
+    import pickle
221
+
222
+    processKnownPeopleImages(img, saveLocation)
223
+    return 'Successfully saved encoding.........'
224
+
225
+
226
+# ********************************   COMPARUISION *********************************************************
227
+#@app.route('/submit', methods=["POST","GET"])
228
+def submit(url_list):
229
+
230
+    from datetime import datetime
231
+    import pytz
232
+    tz_NY = pytz.timezone('Asia/Kolkata')
233
+    datetime_NY = datetime.now(tz_NY)
234
+    India_Date = (datetime_NY.strftime("%Y-%m-%d"))
235
+    India_Date = str(India_Date)
236
+    # India_Time = (datetime_NY.strftime("%I:%M:%S %p"))
237
+    # India_Time = str(India_Time)
238
+    input=url_list
239
+    import pickle
240
+    import cv2
241
+
242
+    from pathlib import Path
243
+    Path("GGallery/"+ India_Date).mkdir(exist_ok=True)
244
+    Path("GGallery/"+ India_Date +'/'+ input["FileName"]).mkdir(exist_ok=True)
245
+
246
+    a = input
247
+    # print(a)
248
+    x = a['FileData']
249
+    # print(x)
250
+    y = a['FileName']
251
+   # z = a['FileType']
252
+    z='jpg'
253
+    # CreatedBy=a['CreatedBy']
254
+
255
+    name = y + '.' + z
256
+    # print(name)
257
+    # print(y)
258
+    # image = y.split("/")
259
+    # filename=image[-1]
260
+
261
+    # print(x)
262
+    img_data = x.encode()
263
+
264
+    import base64
265
+    
266
+
267
+    with open("GGallery/"+India_Date+'/' + input["FileName"] + "/" + name, "wb") as fh:
268
+        fh.write(base64.decodebytes(img_data))
269
+
270
+    path = "GGallery/" +India_Date+'/'+ y + "/" + name
271
+    pickle_location = "ppeople/" + y + "/" + y + ".pickle"
272
+    import pathlib
273
+    file = pathlib.Path(pickle_location)
274
+    if file.exists ():
275
+        pass
276
+    else:
277
+        print ("pickle File not exist")
278
+        print(name)
279
+        return "Face not found in profile (please change your profile)"
280
+
281
+    check_faces="ppeople/" + y + "/" + y + ".jpg"
282
+    print(check_faces)
283
+
284
+
285
+    ############ detecting no of faceses #######################
286
+
287
+
288
+    import cv2
289
+    import numpy as np
290
+    import dlib
291
+
292
+
293
+    # Connects to your computer's default camera
294
+    cap = cv2.imread(check_faces)
295
+
296
+    # Detect the coordinates
297
+    detector = dlib.get_frontal_face_detector()
298
+
299
+    number_of_faces=[]
300
+    # Capture frames continuously
301
+    # while True:
302
+
303
+    # Capture frame-by-frame
304
+    # ret, frame = cap
305
+    frame = cap
306
+
307
+    # RGB to grayscale
308
+    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
309
+    faces = detector(gray)
310
+
311
+    # Iterator to count faces
312
+    i = 0
313
+    for face in faces:
314
+
315
+        # Get the coordinates of faces
316
+        x, y = face.left(), face.top()
317
+        x1, y1 = face.right(), face.bottom()
318
+        cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)
319
+    # Increment iterator for each face in faces
320
+        i = i+1
321
+
322
+    # Display the box and faces
323
+        cv2.putText(frame, 'face num'+str(i), (x-10, y-10),
324
+                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
325
+        
326
+        # if len(i)>1:
327
+        #     print(i)
328
+        number_of_faces.append(i)
329
+
330
+
331
+    if (len(number_of_faces))>1:
332
+        print("Group Photo")
333
+        return "Group Photo"
334
+    elif (len(number_of_faces))==1:
335
+        print("Single Photo")
336
+        pass
337
+
338
+
339
+
340
+
341
+    def readEncodingsPickle(fname):
342
+        """
343
+        Read Pickle file.
344
+
345
+        Parameters
346
+        ----------
347
+        fname : String
348
+            Name of pickle file.(Full location)
349
+
350
+        Returns
351
+        -------
352
+        encodings : list of np arrays
353
+            list of all saved encodings
354
+        names : List of Strings
355
+            List of all saved names
356
+
357
+        """
358
+
359
+
360
+        data = pickle.loads(open(fname, "rb").read())
361
+
362
+
363
+        data = np.array(data)
364
+        encodings = [d["encoding"] for d in data]
365
+        names = [d["name"] for d in data]
366
+        return encodings, names
367
+
368
+
369
+    def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
370
+        """
371
+        Compares face encodings to check if 2 faces are same or not.
372
+
373
+        Parameters
374
+        ----------
375
+        unknown_encoding : np array
376
+            Face encoding of unknown people.
377
+        known_encodings : np array
378
+            Face encodings of known people.
379
+        known_names : list of strings
380
+            Names of known people
381
+
382
+        Returns
383
+        -------
384
+        acceptBool : Bool
385
+            face matched or not
386
+        duplicateName : String
387
+            Name of matched face
388
+        distance : Float
389
+            Distance between 2 faces
390
+
391
+        """
392
+        duplicateName = ""
393
+        distance = 0.0
394
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.54)
395
+        face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
396
+
397
+        best_match_index = np.argmin(face_distances)
398
+
399
+        distance = face_distances[best_match_index]
400
+        if matches[best_match_index]:
401
+            acceptBool = True
402
+            duplicateName = known_names[best_match_index]
403
+        else:
404
+            acceptBool = False
405
+            duplicateName = ""
406
+        return acceptBool, duplicateName, distance
407
+
408
+        #p = []
409
+
410
+
411
+    def processDatasetImages(path=path, pickle_location=pickle_location):
412
+        """
413
+        Process image in dataset from where you want to separate images.
414
+        It separates the images into directories of known people, groups and any unknown people images.
415
+        Parameters
416
+        ----------
417
+        path : STRING, optional
418
+            Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
419
+            It should be noted that each image in this dataset should contain only 1 face.
420
+        saveLocation : STRING, optional
421
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
422
+
423
+        Returns
424
+        -------
425
+        None.
426
+
427
+        """
428
+        # Read pickle file for known people to compare faces from
429
+
430
+        people_encodings, names = readEncodingsPickle(pickle_location)
431
+        # print(p)
432
+        #  imgPath = path + img
433
+
434
+        # Read image
435
+        # path=r"C:\Users\katku\Pictures\final\100011460000611.jpg"
436
+        image = cv2.imread(path)
437
+        #orig = image.copy()
438
+
439
+        # Resize
440
+        image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
441
+
442
+        # Get locations and encodings
443
+        encs, locs = createEncodings(image)
444
+
445
+        # Save image to a group image folder if more than one face is in image
446
+        # if len(locs) > 1:
447
+        #     saveImageToDirectory(orig, "Group", img)
448
+
449
+        # Processing image for each face
450
+        i = 0
451
+        knownFlag = 0
452
+        for loc in locs:
453
+            top, right, bottom, left = loc
454
+            unknown_encoding = encs[i]
455
+            i += 1
456
+            acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
457
+            if acceptBool:
458
+                # saveImageToDirectory(orig, duplicateName,name)
459
+                knownFlag = 1
460
+        if knownFlag == 1:
461
+            print("Match Found")
462
+
463
+
464
+            #print(path)
465
+            with_extension = path.split("/")[-1]
466
+            without_extension = with_extension.split(".")[0]
467
+
468
+            # output_s = {"FileID": without_extension,
469
+            #             "Date": India_Date,
470
+            #             "Time": India_Time}
471
+            # output_json = json.dumps(output_s)
472
+            output_json='Matched successfully'
473
+            print(loc)
474
+            lst.append(output_json)
475
+
476
+            print(output_json)
477
+            # exit()
478
+
479
+        else:
480
+            print('Not Matched')
481
+            pass
482
+            # saveImageToDirectory(orig, "0",name)
483
+
484
+
485
+    import numpy as np
486
+    import json
487
+
488
+    processDatasetImages(path, pickle_location)
489
+    return lst[0]
490
+    #return 'matched successfully'
491
+
492
+
493
+@app.route('/detect', methods=["POST"])
494
+def detect():
495
+    if __name__ == "__main__":
496
+
497
+        url_list=[]
498
+        Dataset=  request.get_json()
499
+        # id = "100013660000125"
500
+        url_list.append(Dataset)
501
+        # multiprocessing
502
+        pool_size = multiprocessing.cpu_count() * 2
503
+        with multiprocessing.Pool(pool_size) as pool:
504
+            try:
505
+                results = pool.map(submit, url_list)
506
+            except FileNotFoundError:
507
+                return 'plese get registered with your PhotoID'
508
+            except IndexError:
509
+                #return 'unable to recognize face'
510
+                return 'failed'
511
+
512
+        pool.close()
513
+
514
+        return results[0]
515
+
516
+
517
+
518
+@app.route('/register', methods=["POST"])
519
+def register():
520
+    print("hello start..........")
521
+    if __name__ == "__main__":
522
+
523
+        url_list=[]
524
+        Dataset=  request.get_json()
525
+        # id = "100013660000125"
526
+        url_list.append(Dataset)
527
+        UserLocation=Dataset["FilePath"]
528
+        print(UserLocation)
529
+
530
+        # if "cO2" in UserLocation or UserLocation is None:
531
+        #     pass
532
+        # else:
533
+        #     return "Please update the URL in the integration"
534
+
535
+
536
+        # multiprocessing
537
+        pool_size = multiprocessing.cpu_count() * 2
538
+        with multiprocessing.Pool(pool_size) as pool:
539
+            try:
540
+                results = pool.map(registered, url_list)
541
+            except IndexError:
542
+                pass
543
+                print('face not found')
544
+            except FileNotFoundError:
545
+                pass
546
+                
547
+            
548
+                #os.remove(img)
549
+               # return 'unable to recognize face'
550
+
551
+        pool.close()
552
+        #return results[0]
553
+        return 'Successfully saved encoding.........'
554
+
555
+
556
+
557
+
558
+if __name__ == "__main__":
559
+    app.run(host='0.0.0.0',port =5006,debug=False)

+ 14
- 2
Attendence/attendence.py Целия файл

@@ -59,7 +59,10 @@ def registered(url_list):
59 59
     # filename=image[-1]
60 60
 
61 61
     # print(x)
62
-    img_data = x.encode()
62
+    try:
63
+        img_data = x.encode()
64
+    except AttributeError:
65
+        return "Successfully saved encoding........."
63 66
 
64 67
     import base64
65 68
 
@@ -388,7 +391,7 @@ def submit(url_list):
388 391
         """
389 392
         duplicateName = ""
390 393
         distance = 0.0
391
-        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.54)
394
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.51)
392 395
         face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
393 396
 
394 397
         best_match_index = np.argmin(face_distances)
@@ -521,6 +524,15 @@ def register():
521 524
         Dataset=  request.get_json()
522 525
         # id = "100013660000125"
523 526
         url_list.append(Dataset)
527
+        UserLocation=Dataset["FilePath"]
528
+        print(UserLocation)
529
+
530
+        # if "cO2" in UserLocation or UserLocation is None:
531
+        #     pass
532
+        # else:
533
+        #     return "Please update the URL in the integration"
534
+
535
+
524 536
         # multiprocessing
525 537
         pool_size = multiprocessing.cpu_count() * 2
526 538
         with multiprocessing.Pool(pool_size) as pool:

+ 402
- 0
Attendence/requirements.txt Целия файл

@@ -0,0 +1,402 @@
1
+absl-py==1.3.0
2
+aio-pika==8.2.3
3
+aiofiles==23.1.0
4
+aiogram==2.25.1
5
+aiohttp @ file:///tmp/build/80754af9/aiohttp_1646806365504/work
6
+aiormq==6.4.2
7
+aiosignal @ file:///tmp/build/80754af9/aiosignal_1637843061372/work
8
+alabaster @ file:///home/ktietz/src/ci/alabaster_1611921544520/work
9
+anaconda-client @ file:///tmp/build/80754af9/anaconda-client_1635342557008/work
10
+anaconda-navigator==2.1.4
11
+anaconda-project @ file:///tmp/build/80754af9/anaconda-project_1637161053845/work
12
+anyio @ file:///tmp/build/80754af9/anyio_1644463572971/work/dist
13
+appdirs==1.4.4
14
+APScheduler==3.9.1.post1
15
+argon2-cffi @ file:///opt/conda/conda-bld/argon2-cffi_1645000214183/work
16
+argon2-cffi-bindings @ file:///tmp/build/80754af9/argon2-cffi-bindings_1644569679365/work
17
+arrow @ file:///opt/conda/conda-bld/arrow_1649166651673/work
18
+astroid @ file:///tmp/build/80754af9/astroid_1628063140030/work
19
+astropy @ file:///opt/conda/conda-bld/astropy_1650891077797/work
20
+asttokens @ file:///opt/conda/conda-bld/asttokens_1646925590279/work
21
+astunparse==1.6.3
22
+async-timeout==4.0.2
23
+atomicwrites==1.4.0
24
+attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
25
+Automat @ file:///tmp/build/80754af9/automat_1600298431173/work
26
+autopep8 @ file:///opt/conda/conda-bld/autopep8_1639166893812/work
27
+Babel @ file:///tmp/build/80754af9/babel_1620871417480/work
28
+backcall @ file:///home/ktietz/src/ci/backcall_1611930011877/work
29
+backports.functools-lru-cache @ file:///tmp/build/80754af9/backports.functools_lru_cache_1618170165463/work
30
+backports.tempfile @ file:///home/linux1/recipes/ci/backports.tempfile_1610991236607/work
31
+backports.weakref==1.0.post1
32
+bcrypt @ file:///tmp/build/80754af9/bcrypt_1607022650461/work
33
+beautifulsoup4 @ file:///opt/conda/conda-bld/beautifulsoup4_1650462163268/work
34
+bidict==0.22.1
35
+binaryornot @ file:///tmp/build/80754af9/binaryornot_1617751525010/work
36
+bitarray @ file:///tmp/build/80754af9/bitarray_1648739490228/work
37
+bkcharts==0.2
38
+black==19.10b0
39
+bleach @ file:///opt/conda/conda-bld/bleach_1641577558959/work
40
+bokeh @ file:///tmp/build/80754af9/bokeh_1638362822154/work
41
+boto3 @ file:///opt/conda/conda-bld/boto3_1649078879353/work
42
+botocore @ file:///opt/conda/conda-bld/botocore_1649076662316/work
43
+Bottleneck @ file:///tmp/build/80754af9/bottleneck_1648028898966/work
44
+brotlipy==0.7.0
45
+CacheControl==0.12.11
46
+cachetools @ file:///tmp/build/80754af9/cachetools_1619597386817/work
47
+certifi==2021.10.8
48
+cffi @ file:///opt/conda/conda-bld/cffi_1642701102775/work
49
+chardet @ file:///tmp/build/80754af9/chardet_1607706775000/work
50
+charset-normalizer @ file:///tmp/build/80754af9/charset-normalizer_1630003229654/work
51
+click @ file:///tmp/build/80754af9/click_1646056590078/work
52
+cloudpickle @ file:///tmp/build/80754af9/cloudpickle_1632508026186/work
53
+clyent==1.2.2
54
+colorama @ file:///tmp/build/80754af9/colorama_1607707115595/work
55
+colorcet @ file:///tmp/build/80754af9/colorcet_1611168489822/work
56
+colorclass==2.2.2
57
+coloredlogs==15.0.1
58
+colorhash==1.2.1
59
+conda==4.12.0
60
+conda-build==3.21.8
61
+conda-content-trust @ file:///tmp/build/80754af9/conda-content-trust_1617045594566/work
62
+conda-pack @ file:///tmp/build/80754af9/conda-pack_1611163042455/work
63
+conda-package-handling @ file:///tmp/build/80754af9/conda-package-handling_1649105784853/work
64
+conda-repo-cli @ file:///tmp/build/80754af9/conda-repo-cli_1620168426516/work
65
+conda-token @ file:///tmp/build/80754af9/conda-token_1620076980546/work
66
+conda-verify==3.4.2
67
+confluent-kafka==1.9.2
68
+constantly==15.1.0
69
+cookiecutter @ file:///opt/conda/conda-bld/cookiecutter_1649151442564/work
70
+cryptography @ file:///tmp/build/80754af9/cryptography_1633520369886/work
71
+cssselect==1.1.0
72
+cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
73
+Cython @ file:///tmp/build/80754af9/cython_1647850345254/work
74
+cytoolz==0.11.0
75
+daal4py==2021.5.0
76
+dask==2022.10.2
77
+datashader @ file:///tmp/build/80754af9/datashader_1623782308369/work
78
+datashape==0.5.4
79
+debugpy @ file:///tmp/build/80754af9/debugpy_1637091799509/work
80
+decorator @ file:///opt/conda/conda-bld/decorator_1643638310831/work
81
+defusedxml @ file:///tmp/build/80754af9/defusedxml_1615228127516/work
82
+diff-match-patch @ file:///Users/ktietz/demo/mc3/conda-bld/diff-match-patch_1630511840874/work
83
+distributed @ file:///opt/conda/conda-bld/distributed_1647271944416/work
84
+dlib==19.24.0
85
+dnspython==1.16.0
86
+docopt==0.6.2
87
+docutils @ file:///tmp/build/80754af9/docutils_1620827980776/work
88
+entrypoints @ file:///tmp/build/80754af9/entrypoints_1649926439650/work
89
+et-xmlfile==1.1.0
90
+executing @ file:///opt/conda/conda-bld/executing_1646925071911/work
91
+face-recognition==1.3.0
92
+face-recognition-models==0.3.0
93
+fastjsonschema @ file:///tmp/build/80754af9/python-fastjsonschema_1620414857593/work/dist
94
+fbmessenger==6.0.0
95
+filelock @ file:///opt/conda/conda-bld/filelock_1647002191454/work
96
+fire==0.5.0
97
+flake8 @ file:///tmp/build/80754af9/flake8_1620776156532/work
98
+Flask @ file:///home/ktietz/src/ci/flask_1611932660458/work
99
+flatbuffers==23.3.3
100
+fonttools==4.25.0
101
+frozenlist @ file:///tmp/build/80754af9/frozenlist_1637767113340/work
102
+fsspec @ file:///opt/conda/conda-bld/fsspec_1647268051896/work
103
+future @ file:///tmp/build/80754af9/future_1607571303524/work
104
+gast==0.4.0
105
+gensim @ file:///tmp/build/80754af9/gensim_1646806807927/work
106
+glob2 @ file:///home/linux1/recipes/ci/glob2_1610991677669/work
107
+gmpy2 @ file:///tmp/build/80754af9/gmpy2_1645438755360/work
108
+google-api-core @ file:///C:/ci/google-api-core-split_1613980333946/work
109
+google-auth @ file:///tmp/build/80754af9/google-auth_1626320605116/work
110
+google-auth-oauthlib==0.4.6
111
+google-cloud-core @ file:///tmp/build/80754af9/google-cloud-core_1625077425256/work
112
+google-cloud-storage @ file:///tmp/build/80754af9/google-cloud-storage_1601307969662/work
113
+google-crc32c @ file:///tmp/build/80754af9/google-crc32c_1612242928148/work
114
+google-pasta==0.2.0
115
+google-resumable-media @ file:///tmp/build/80754af9/google-resumable-media_1624367812531/work
116
+googleapis-common-protos @ file:///tmp/build/80754af9/googleapis-common-protos-feedstock_1617957652138/work
117
+greenlet @ file:///tmp/build/80754af9/greenlet_1628888132713/work
118
+grpcio @ file:///tmp/build/80754af9/grpcio_1637590821884/work
119
+h5py @ file:///tmp/build/80754af9/h5py_1637138488546/work
120
+HeapDict @ file:///Users/ktietz/demo/mc3/conda-bld/heapdict_1630598515714/work
121
+holoviews @ file:///opt/conda/conda-bld/holoviews_1645454331194/work
122
+httptools==0.5.0
123
+humanfriendly==10.0
124
+hvplot @ file:///tmp/build/80754af9/hvplot_1627305124151/work
125
+hyperlink @ file:///tmp/build/80754af9/hyperlink_1610130746837/work
126
+idna @ file:///tmp/build/80754af9/idna_1637925883363/work
127
+imagecodecs @ file:///tmp/build/80754af9/imagecodecs_1635529108216/work
128
+imageio @ file:///tmp/build/80754af9/imageio_1617700267927/work
129
+imagesize @ file:///tmp/build/80754af9/imagesize_1637939814114/work
130
+importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648544546694/work
131
+incremental @ file:///tmp/build/80754af9/incremental_1636629750599/work
132
+inflection==0.5.1
133
+iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
134
+intake @ file:///opt/conda/conda-bld/intake_1647436631684/work
135
+intervaltree @ file:///Users/ktietz/demo/mc3/conda-bld/intervaltree_1630511889664/work
136
+ipykernel @ file:///tmp/build/80754af9/ipykernel_1647000773790/work/dist/ipykernel-6.9.1-py3-none-any.whl
137
+ipython @ file:///tmp/build/80754af9/ipython_1648817057602/work
138
+ipython-genutils @ file:///tmp/build/80754af9/ipython_genutils_1606773439826/work
139
+ipywidgets @ file:///tmp/build/80754af9/ipywidgets_1634143127070/work
140
+isort @ file:///tmp/build/80754af9/isort_1628603791788/work
141
+itemadapter @ file:///tmp/build/80754af9/itemadapter_1626442940632/work
142
+itemloaders @ file:///opt/conda/conda-bld/itemloaders_1646805235997/work
143
+itsdangerous @ file:///tmp/build/80754af9/itsdangerous_1621432558163/work
144
+jdcal @ file:///Users/ktietz/demo/mc3/conda-bld/jdcal_1630584345063/work
145
+jedi @ file:///tmp/build/80754af9/jedi_1644297102865/work
146
+jeepney @ file:///tmp/build/80754af9/jeepney_1627537048313/work
147
+Jinja2 @ file:///tmp/build/80754af9/jinja2_1612213139570/work
148
+jinja2-time @ file:///opt/conda/conda-bld/jinja2-time_1649251842261/work
149
+jmespath @ file:///Users/ktietz/demo/mc3/conda-bld/jmespath_1630583964805/work
150
+joblib @ file:///tmp/build/80754af9/joblib_1635411271373/work
151
+json5 @ file:///tmp/build/80754af9/json5_1624432770122/work
152
+jsonpickle==3.0.1
153
+jsonschema @ file:///tmp/build/80754af9/jsonschema_1650025953207/work
154
+jupyter @ file:///tmp/build/80754af9/jupyter_1607700846274/work
155
+jupyter-client @ file:///tmp/build/80754af9/jupyter_client_1616770841739/work
156
+jupyter-console @ file:///tmp/build/80754af9/jupyter_console_1616615302928/work
157
+jupyter-core @ file:///tmp/build/80754af9/jupyter_core_1646976314572/work
158
+jupyter-server @ file:///opt/conda/conda-bld/jupyter_server_1644494914632/work
159
+jupyterlab @ file:///opt/conda/conda-bld/jupyterlab_1647445413472/work
160
+jupyterlab-pygments @ file:///tmp/build/80754af9/jupyterlab_pygments_1601490720602/work
161
+jupyterlab-server @ file:///opt/conda/conda-bld/jupyterlab_server_1644500396812/work
162
+jupyterlab-widgets @ file:///tmp/build/80754af9/jupyterlab_widgets_1609884341231/work
163
+keras==2.11.0
164
+keyring @ file:///tmp/build/80754af9/keyring_1638531355686/work
165
+kiwisolver @ file:///opt/conda/conda-bld/kiwisolver_1638569886207/work
166
+lazy-object-proxy @ file:///tmp/build/80754af9/lazy-object-proxy_1616529027849/work
167
+libarchive-c @ file:///tmp/build/80754af9/python-libarchive-c_1617780486945/work
168
+libclang==16.0.0
169
+llvmlite==0.38.0
170
+locket @ file:///tmp/build/80754af9/locket_1647006009810/work
171
+lxml @ file:///tmp/build/80754af9/lxml_1646624513062/work
172
+magic-filter==1.0.9
173
+Markdown @ file:///tmp/build/80754af9/markdown_1614363852612/work
174
+MarkupSafe @ file:///tmp/build/80754af9/markupsafe_1621523467000/work
175
+matplotlib @ file:///tmp/build/80754af9/matplotlib-suite_1647441664166/work
176
+matplotlib-inline @ file:///tmp/build/80754af9/matplotlib-inline_1628242447089/work
177
+mattermostwrapper==2.2
178
+mccabe==0.6.1
179
+mistune @ file:///tmp/build/80754af9/mistune_1607364877025/work
180
+mkl-fft==1.3.1
181
+mkl-random @ file:///tmp/build/80754af9/mkl_random_1626186066731/work
182
+mkl-service==2.4.0
183
+mock @ file:///tmp/build/80754af9/mock_1607622725907/work
184
+mpmath==1.2.1
185
+msgpack @ file:///tmp/build/80754af9/msgpack-python_1612287166301/work
186
+multidict @ file:///opt/conda/conda-bld/multidict_1640703752579/work
187
+multipledispatch @ file:///tmp/build/80754af9/multipledispatch_1607574243360/work
188
+munkres==1.1.4
189
+mypy-extensions==0.4.3
190
+navigator-updater==0.2.1
191
+nbclassic @ file:///opt/conda/conda-bld/nbclassic_1644943264176/work
192
+nbclient @ file:///tmp/build/80754af9/nbclient_1650290509967/work
193
+nbconvert @ file:///opt/conda/conda-bld/nbconvert_1649751911790/work
194
+nbformat @ file:///tmp/build/80754af9/nbformat_1649826788557/work
195
+nest-asyncio @ file:///tmp/build/80754af9/nest-asyncio_1649847906199/work
196
+networkx==2.6.3
197
+nltk @ file:///opt/conda/conda-bld/nltk_1645628263994/work
198
+nose @ file:///opt/conda/conda-bld/nose_1642704612149/work
199
+notebook @ file:///tmp/build/80754af9/notebook_1645002532094/work
200
+numba @ file:///opt/conda/conda-bld/numba_1648040517072/work
201
+numexpr @ file:///tmp/build/80754af9/numexpr_1640689833592/work
202
+numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1649764630438/work
203
+numpydoc @ file:///opt/conda/conda-bld/numpydoc_1643788541039/work
204
+oauthlib==3.2.2
205
+olefile @ file:///Users/ktietz/demo/mc3/conda-bld/olefile_1629805411829/work
206
+opencv-python==4.7.0.72
207
+openpyxl @ file:///tmp/build/80754af9/openpyxl_1632777717936/work
208
+opt-einsum==3.3.0
209
+packaging==20.9
210
+pamqp==3.2.1
211
+pandas==1.4.2
212
+pandocfilters @ file:///opt/conda/conda-bld/pandocfilters_1643405455980/work
213
+panel @ file:///opt/conda/conda-bld/panel_1650637168846/work
214
+param @ file:///tmp/build/80754af9/param_1636647414893/work
215
+parsel @ file:///tmp/build/80754af9/parsel_1646722533460/work
216
+parso @ file:///opt/conda/conda-bld/parso_1641458642106/work
217
+partd @ file:///opt/conda/conda-bld/partd_1647245470509/work
218
+pathspec==0.7.0
219
+patsy==0.5.2
220
+pep8==1.7.1
221
+pexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work
222
+pickleshare @ file:///tmp/build/80754af9/pickleshare_1606932040724/work
223
+Pillow==9.0.1
224
+pkginfo @ file:///tmp/build/80754af9/pkginfo_1643162084911/work
225
+plotly @ file:///opt/conda/conda-bld/plotly_1646671701182/work
226
+pluggy @ file:///tmp/build/80754af9/pluggy_1648024445381/work
227
+poyo @ file:///tmp/build/80754af9/poyo_1617751526755/work
228
+prometheus-client @ file:///opt/conda/conda-bld/prometheus_client_1643788673601/work
229
+prompt-toolkit @ file:///tmp/build/80754af9/prompt-toolkit_1633440160888/work
230
+Protego @ file:///tmp/build/80754af9/protego_1598657180827/work
231
+protobuf==3.19.1
232
+psutil @ file:///tmp/build/80754af9/psutil_1612297992929/work
233
+psycopg2-binary==2.9.6
234
+ptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
235
+pure-eval @ file:///opt/conda/conda-bld/pure_eval_1646925070566/work
236
+py @ file:///opt/conda/conda-bld/py_1644396412707/work
237
+pyasn1 @ file:///Users/ktietz/demo/mc3/conda-bld/pyasn1_1629708007385/work
238
+pyasn1-modules==0.2.8
239
+pycodestyle @ file:///tmp/build/80754af9/pycodestyle_1615748559966/work
240
+pycosat==0.6.3
241
+pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
242
+pyct @ file:///tmp/build/80754af9/pyct_1613411549454/work
243
+pycurl==7.44.1
244
+pydantic==1.10.2
245
+PyDispatcher==2.0.5
246
+pydocstyle @ file:///tmp/build/80754af9/pydocstyle_1621600989141/work
247
+pydot==1.4.2
248
+pyerfa @ file:///tmp/build/80754af9/pyerfa_1621556109336/work
249
+pyflakes @ file:///tmp/build/80754af9/pyflakes_1617200973297/work
250
+Pygments @ file:///opt/conda/conda-bld/pygments_1644249106324/work
251
+PyHamcrest @ file:///tmp/build/80754af9/pyhamcrest_1615748656804/work
252
+PyJWT @ file:///tmp/build/80754af9/pyjwt_1619682484438/work
253
+pykwalify==1.8.0
254
+pylint @ file:///tmp/build/80754af9/pylint_1627536788603/work
255
+pyls-spyder==0.4.0
256
+pymongo==3.10.1
257
+pyodbc @ file:///tmp/build/80754af9/pyodbc_1647425888968/work
258
+pyOpenSSL @ file:///tmp/build/80754af9/pyopenssl_1635333100036/work
259
+pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
260
+pyrsistent @ file:///tmp/build/80754af9/pyrsistent_1636110951836/work
261
+PySocks @ file:///tmp/build/80754af9/pysocks_1605305812635/work
262
+pytest==7.1.1
263
+python-crfsuite==0.9.9
264
+python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
265
+python-engineio==4.4.0
266
+python-lsp-black @ file:///tmp/build/80754af9/python-lsp-black_1634232156041/work
267
+python-lsp-jsonrpc==1.0.0
268
+python-lsp-server==1.2.4
269
+python-slugify @ file:///tmp/build/80754af9/python-slugify_1620405669636/work
270
+python-snappy @ file:///tmp/build/80754af9/python-snappy_1610133040135/work
271
+python-socketio==5.8.0
272
+pytz==2021.3
273
+pytz-deprecation-shim==0.1.0.post0
274
+pyviz-comms @ file:///tmp/build/80754af9/pyviz_comms_1623747165329/work
275
+PyWavelets @ file:///tmp/build/80754af9/pywavelets_1648710015787/work
276
+pyxdg @ file:///tmp/build/80754af9/pyxdg_1603822279816/work
277
+PyYAML==6.0
278
+pyzmq @ file:///tmp/build/80754af9/pyzmq_1638434985866/work
279
+QDarkStyle @ file:///tmp/build/80754af9/qdarkstyle_1617386714626/work
280
+qstylizer @ file:///tmp/build/80754af9/qstylizer_1617713584600/work/dist/qstylizer-0.1.10-py2.py3-none-any.whl
281
+QtAwesome @ file:///tmp/build/80754af9/qtawesome_1637160816833/work
282
+qtconsole @ file:///opt/conda/conda-bld/qtconsole_1649078897110/work
283
+QtPy @ file:///opt/conda/conda-bld/qtpy_1649073884068/work
284
+questionary==1.10.0
285
+queuelib==1.5.0
286
+randomname==0.1.5
287
+rasa==3.5.4
288
+rasa-sdk==3.5.0
289
+redis==4.5.4
290
+regex @ file:///tmp/build/80754af9/regex_1648447707500/work
291
+requests @ file:///opt/conda/conda-bld/requests_1641824580448/work
292
+requests-file @ file:///Users/ktietz/demo/mc3/conda-bld/requests-file_1629455781986/work
293
+requests-oauthlib==1.3.1
294
+requests-toolbelt==0.10.1
295
+rocketchat-API==1.28.1
296
+rope @ file:///opt/conda/conda-bld/rope_1643788605236/work
297
+rsa @ file:///tmp/build/80754af9/rsa_1614366226499/work
298
+Rtree @ file:///tmp/build/80754af9/rtree_1618420843093/work
299
+ruamel-yaml-conda @ file:///tmp/build/80754af9/ruamel_yaml_1616016711199/work
300
+ruamel.yaml==0.17.21
301
+ruamel.yaml.clib==0.2.7
302
+s3transfer @ file:///tmp/build/80754af9/s3transfer_1626435152308/work
303
+sanic==21.12.2
304
+Sanic-Cors==2.0.1
305
+sanic-jwt==1.8.0
306
+sanic-routing==0.7.2
307
+scikit-image @ file:///tmp/build/80754af9/scikit-image_1648214171611/work
308
+scikit-learn @ file:///tmp/build/80754af9/scikit-learn_1642617106979/work
309
+scikit-learn-intelex==2021.20220215.212715
310
+scipy @ file:///tmp/build/80754af9/scipy_1641555004408/work
311
+Scrapy @ file:///tmp/build/80754af9/scrapy_1646837771788/work
312
+seaborn @ file:///tmp/build/80754af9/seaborn_1629307859561/work
313
+SecretStorage @ file:///tmp/build/80754af9/secretstorage_1614022780358/work
314
+Send2Trash @ file:///tmp/build/80754af9/send2trash_1632406701022/work
315
+sentry-sdk==1.14.0
316
+service-identity @ file:///Users/ktietz/demo/mc3/conda-bld/service_identity_1629460757137/work
317
+sip==4.19.13
318
+six @ file:///tmp/build/80754af9/six_1644875935023/work
319
+sklearn-crfsuite==0.3.6
320
+slack-sdk==3.21.0
321
+smart-open @ file:///tmp/build/80754af9/smart_open_1623928409369/work
322
+sniffio @ file:///tmp/build/80754af9/sniffio_1614030464178/work
323
+snowballstemmer @ file:///tmp/build/80754af9/snowballstemmer_1637937080595/work
324
+sortedcollections @ file:///tmp/build/80754af9/sortedcollections_1611172717284/work
325
+sortedcontainers @ file:///tmp/build/80754af9/sortedcontainers_1623949099177/work
326
+soupsieve @ file:///tmp/build/80754af9/soupsieve_1636706018808/work
327
+Sphinx @ file:///opt/conda/conda-bld/sphinx_1643644169832/work
328
+sphinxcontrib-applehelp @ file:///home/ktietz/src/ci/sphinxcontrib-applehelp_1611920841464/work
329
+sphinxcontrib-devhelp @ file:///home/ktietz/src/ci/sphinxcontrib-devhelp_1611920923094/work
330
+sphinxcontrib-htmlhelp @ file:///tmp/build/80754af9/sphinxcontrib-htmlhelp_1623945626792/work
331
+sphinxcontrib-jsmath @ file:///home/ktietz/src/ci/sphinxcontrib-jsmath_1611920942228/work
332
+sphinxcontrib-qthelp @ file:///home/ktietz/src/ci/sphinxcontrib-qthelp_1611921055322/work
333
+sphinxcontrib-serializinghtml @ file:///tmp/build/80754af9/sphinxcontrib-serializinghtml_1624451540180/work
334
+spyder @ file:///tmp/build/80754af9/spyder_1636479868270/work
335
+spyder-kernels @ file:///tmp/build/80754af9/spyder-kernels_1634236920897/work
336
+SQLAlchemy @ file:///tmp/build/80754af9/sqlalchemy_1647581680159/work
337
+stack-data @ file:///opt/conda/conda-bld/stack_data_1646927590127/work
338
+statsmodels @ file:///tmp/build/80754af9/statsmodels_1648015433305/work
339
+sympy @ file:///tmp/build/80754af9/sympy_1647853653589/work
340
+tables @ file:///tmp/build/80754af9/pytables_1607975397488/work
341
+tabulate==0.8.9
342
+tarsafe==0.0.3
343
+TBB==0.2
344
+tblib @ file:///Users/ktietz/demo/mc3/conda-bld/tblib_1629402031467/work
345
+tenacity @ file:///tmp/build/80754af9/tenacity_1626248292117/work
346
+tensorboard==2.11.2
347
+tensorboard-data-server==0.6.1
348
+tensorboard-plugin-wit==1.8.1
349
+tensorflow==2.11.0
350
+tensorflow-addons==0.19.0
351
+tensorflow-estimator==2.11.0
352
+tensorflow-hub==0.12.0
353
+tensorflow-io-gcs-filesystem==0.32.0
354
+tensorflow-text==2.11.0
355
+termcolor==2.2.0
356
+terminado @ file:///tmp/build/80754af9/terminado_1644322582718/work
357
+terminaltables==3.1.10
358
+testpath @ file:///tmp/build/80754af9/testpath_1624638946665/work
359
+text-unidecode @ file:///Users/ktietz/demo/mc3/conda-bld/text-unidecode_1629401354553/work
360
+textdistance @ file:///tmp/build/80754af9/textdistance_1612461398012/work
361
+threadpoolctl @ file:///Users/ktietz/demo/mc3/conda-bld/threadpoolctl_1629802263681/work
362
+three-merge @ file:///tmp/build/80754af9/three-merge_1607553261110/work
363
+tifffile @ file:///tmp/build/80754af9/tifffile_1627275862826/work
364
+tinycss @ file:///tmp/build/80754af9/tinycss_1617713798712/work
365
+tldextract @ file:///opt/conda/conda-bld/tldextract_1646638314385/work
366
+toml @ file:///tmp/build/80754af9/toml_1616166611790/work
367
+tomli @ file:///tmp/build/80754af9/tomli_1637314251069/work
368
+toolz @ file:///tmp/build/80754af9/toolz_1636545406491/work
369
+tornado @ file:///tmp/build/80754af9/tornado_1606942317143/work
370
+tqdm @ file:///opt/conda/conda-bld/tqdm_1650891076910/work
371
+traitlets @ file:///tmp/build/80754af9/traitlets_1636710298902/work
372
+twilio==7.14.2
373
+Twisted @ file:///tmp/build/80754af9/twisted_1646835200521/work
374
+typed-ast @ file:///tmp/build/80754af9/typed-ast_1624953673314/work
375
+typeguard==3.0.2
376
+typing-utils==0.1.0
377
+typing_extensions==4.5.0
378
+tzdata==2023.3
379
+tzlocal==4.3
380
+ujson @ file:///tmp/build/80754af9/ujson_1648025916270/work
381
+Unidecode @ file:///tmp/build/80754af9/unidecode_1614712377438/work
382
+urllib3==1.26.15
383
+uvloop==0.17.0
384
+w3lib @ file:///Users/ktietz/demo/mc3/conda-bld/w3lib_1629359764703/work
385
+watchdog @ file:///tmp/build/80754af9/watchdog_1638367282716/work
386
+wcwidth @ file:///Users/ktietz/demo/mc3/conda-bld/wcwidth_1629357192024/work
387
+webencodings==0.5.1
388
+webexteamssdk==1.6.1
389
+websocket-client @ file:///tmp/build/80754af9/websocket-client_1614803975924/work
390
+websockets==10.4
391
+Werkzeug @ file:///opt/conda/conda-bld/werkzeug_1645628268370/work
392
+widgetsnbextension @ file:///tmp/build/80754af9/widgetsnbextension_1644992802045/work
393
+wrapt @ file:///tmp/build/80754af9/wrapt_1607574498026/work
394
+wurlitzer @ file:///tmp/build/80754af9/wurlitzer_1638368168359/work
395
+xarray @ file:///opt/conda/conda-bld/xarray_1639166117697/work
396
+xlrd @ file:///tmp/build/80754af9/xlrd_1608072521494/work
397
+XlsxWriter @ file:///opt/conda/conda-bld/xlsxwriter_1649073856329/work
398
+yapf @ file:///tmp/build/80754af9/yapf_1615749224965/work
399
+yarl @ file:///tmp/build/80754af9/yarl_1606939947528/work
400
+zict==2.0.0
401
+zipp @ file:///opt/conda/conda-bld/zipp_1641824620731/work
402
+zope.interface @ file:///tmp/build/80754af9/zope.interface_1625036153595/work

+ 557
- 0
Attendence/test_attendence.py Целия файл

@@ -0,0 +1,557 @@
1
+from flask import Flask, render_template, request, redirect, Response, send_file
2
+import multiprocessing
3
+import face_recognition
4
+import os
5
+import asyncio
6
+#from flask_cors import CORS
7
+app = Flask(__name__)
8
+#CORS(app)
9
+lst = []
10
+
11
+
12
+@app.route('/', methods=['GET'])
13
+def resume():
14
+    #return render_template('index.html')
15
+    return 'Attendence test app running'
16
+
17
+def createEncodings(image):
18
+    print("Encoding")
19
+    """
20
+    Create face encodings for a given image and also return face locations in the given image.
21
+    Parameters
22
+    -------
23
+    image : cv2 mat
24
+        Image you want to detect faces from.
25
+    Returns
26
+    -------
27
+    known_encodings : list of np array
28
+        List of face encodings in a given image
29
+    face_locations : list of tuples
30
+        list of tuples for face locations in a given image
31
+    """
32
+    # Find face locations for all faces in an image
33
+    face_locations = face_recognition.face_locations(image)
34
+    # Create encodings for all faces in an image
35
+    known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
36
+    return known_encodings, face_locations
37
+#@app.route('/registered', methods=["POST","GET"])
38
+def registered(url_list):
39
+    input=url_list
40
+
41
+    from pathlib import Path
42
+    Path("ppeople").mkdir(exist_ok=True)
43
+    Path("ppeople/" + input["FileName"]).mkdir(exist_ok=True)
44
+
45
+    a = input
46
+    # print(a)
47
+    x = a['FileData']
48
+    # print(x)
49
+    y = a['FileName']
50
+    #z = a['FileType']
51
+    z='jpg'
52
+    # CreatedBy=a['CreatedBy']
53
+
54
+    name = y+ '.'+ z
55
+    print(name)
56
+    # print(y)
57
+    # image = y.split("/")
58
+    # filename=image[-1]
59
+
60
+    # print(x)
61
+    img_data = x.encode()
62
+
63
+    import base64
64
+
65
+    with open("ppeople/" + input["FileName"] + "/" + name, "wb") as fh:
66
+        fh.write(base64.decodebytes(img_data))
67
+        fh.close()
68
+
69
+    img = "ppeople/" + y + "/" + name
70
+    saveLocation = "ppeople/" + y + "/" + y + ".pickle"
71
+
72
+    ############ detecting no of faceses #######################
73
+
74
+
75
+    # import cv2
76
+    # import numpy as np
77
+    # import dlib
78
+
79
+
80
+    # # Connects to your computer's default camera
81
+    # cap = cv2.imread(img)
82
+
83
+    # # Detect the coordinates
84
+    # detector = dlib.get_frontal_face_detector()
85
+
86
+    # number_of_faces=[]
87
+    # # Capture frames continuously
88
+    # # while True:
89
+
90
+    # # Capture frame-by-frame
91
+    # # ret, frame = cap
92
+    # frame = cap
93
+
94
+    # # RGB to grayscale
95
+    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
96
+    # faces = detector(gray)
97
+
98
+    # # Iterator to count faces
99
+    # i = 0
100
+    # for face in faces:
101
+
102
+    #     # Get the coordinates of faces
103
+    #     x, y = face.left(), face.top()
104
+    #     x1, y1 = face.right(), face.bottom()
105
+    #     cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)
106
+    # # Increment iterator for each face in faces
107
+    #     i = i+1
108
+
109
+    # # Display the box and faces
110
+    #     cv2.putText(frame, 'face num'+str(i), (x-10, y-10),
111
+    #                 cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
112
+        
113
+    #     # if len(i)>1:
114
+    #     #     print(i)
115
+    #     number_of_faces.append(i)
116
+
117
+
118
+    # if (len(number_of_faces))>1:
119
+    #     print("Group Photo")
120
+    #     return "Group Photo"
121
+    # elif (len(number_of_faces))==1:
122
+    #     print("Single Photo")
123
+    #     pass
124
+
125
+
126
+
127
+
128
+
129
+    def saveEncodings(encs, names, fname='encodings.pickle'):
130
+        """
131
+        Save encodings in a pickle file to be used in future.
132
+
133
+        Parameters
134
+        ----------
135
+        encs : List of np arrays
136
+            List of face encodings.
137
+        names : List of strings
138
+            List of names for each face encoding.
139
+        fname : String, optional
140
+            Name/Location for pickle file. The default is "encodings.pickle".
141
+
142
+        Returns
143
+        -------
144
+        None.
145
+
146
+        """
147
+
148
+        data = []
149
+        d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
150
+        data.extend(d)
151
+
152
+        encodingsFile = fname
153
+
154
+        # dump the facial encodings data to disk
155
+        print("[INFO] serializing encodings...")
156
+        print("[INFO] Encodings Created sucessfully")
157
+        f = open(encodingsFile, "wb")
158
+        f.write(pickle.dumps(data))
159
+        f.close()
160
+
161
+    # Function to create encodings and get face locations
162
+
163
+
164
+    def processKnownPeopleImages(img=img, saveLocation=saveLocation):
165
+        """
166
+        Process images of known people and create face encodings to compare in future.
167
+        Eaach image should have just 1 face in it.
168
+        Parameters
169
+        ----------
170
+        path : STRING, optional
171
+            Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
172
+            It should be noted that each image in this dataset should contain only 1 face.
173
+        saveLocation : STRING, optional
174
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
175
+        Returns
176
+        -------
177
+        None.
178
+        """
179
+        known_encodings = []
180
+        known_names = []
181
+        # for img in os.listdir(path):
182
+
183
+        imgPath = img
184
+        # Read image
185
+        image = cv2.imread(imgPath)
186
+        name = img.rsplit('.')[0]
187
+        # Resize
188
+        try:
189
+            print(image.shape)
190
+        except AttributeError:
191
+            return "Successfully saved encoding........."
192
+        image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
193
+        # Get locations and encodings
194
+        encs, locs = createEncodings(image)
195
+        try:
196
+            known_encodings.append(encs[0])
197
+        except IndexError:
198
+            os.remove(saveLocation)
199
+            print('------------------------------------- save location --------------------------------')
200
+            print(saveLocation)
201
+            return "hello world!"
202
+        
203
+            
204
+        # known_encodings.append(encs[0])
205
+        known_names.append(name)
206
+        for loc in locs:
207
+            top, right, bottom, left = loc
208
+        # Show Image
209
+        # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
210
+        # cv2.imshow("Image", image)
211
+        # cv2.waitKey(1)
212
+        # cv2.destroyAllWindows()
213
+        saveEncodings(known_encodings, known_names, saveLocation)
214
+
215
+    import cv2
216
+    #import face_recognition
217
+    import pickle
218
+
219
+    processKnownPeopleImages(img, saveLocation)
220
+    return 'Successfully saved encoding.........'
221
+
222
+
223
+# ********************************   COMPARUISION *********************************************************
224
+#@app.route('/submit', methods=["POST","GET"])
225
+def submit(url_list):
226
+
227
+    from datetime import datetime
228
+    import pytz
229
+    tz_NY = pytz.timezone('Asia/Kolkata')
230
+    datetime_NY = datetime.now(tz_NY)
231
+    India_Date = (datetime_NY.strftime("%Y-%m-%d"))
232
+    India_Date = str(India_Date)
233
+    India_Time = (datetime_NY.strftime("%I:%M:%S %p"))
234
+    India_Time = str(India_Time)
235
+    input=url_list
236
+    import pickle
237
+    import cv2
238
+
239
+    from pathlib import Path
240
+    Path("GGallery").mkdir(exist_ok=True)
241
+    Path("GGallery/" + input["FileName"]).mkdir(exist_ok=True)
242
+
243
+    a = input
244
+    # print(a)
245
+    x = a['FileData']
246
+    # print(x)
247
+    y = a['FileName']
248
+   # z = a['FileType']
249
+    z='jpg'
250
+    # CreatedBy=a['CreatedBy']
251
+
252
+    name = y + '.' + z
253
+    # print(name)
254
+    # print(y)
255
+    # image = y.split("/")
256
+    # filename=image[-1]
257
+
258
+    # print(x)
259
+    img_data = x.encode()
260
+
261
+    import base64
262
+    
263
+
264
+    with open("GGallery/" + input["FileName"] + "/" + name, "wb") as fh:
265
+        fh.write(base64.decodebytes(img_data))
266
+        fh.close()
267
+
268
+    path = "GGallery/" + y + "/" + name
269
+    pickle_location = "ppeople/" + y + "/" + y + ".pickle"
270
+    import pathlib
271
+    file = pathlib.Path(pickle_location)
272
+    if file.exists ():
273
+        pass
274
+    else:
275
+        print ("pickle File not exist")
276
+        print(name)
277
+        return "Face not found in profile (please change your profile)"
278
+
279
+    check_faces="ppeople/" + y + "/" + y + ".jpg"
280
+    print(check_faces)
281
+
282
+
283
+    ############ detecting no of faceses #######################
284
+
285
+
286
+    import cv2
287
+    import numpy as np
288
+    import dlib
289
+
290
+
291
+    # Connects to your computer's default camera
292
+    cap = cv2.imread(check_faces)
293
+
294
+    # Detect the coordinates
295
+    detector = dlib.get_frontal_face_detector()
296
+
297
+    number_of_faces=[]
298
+    # Capture frames continuously
299
+    # while True:
300
+
301
+    # Capture frame-by-frame
302
+    # ret, frame = cap
303
+    frame = cap
304
+
305
+    # RGB to grayscale
306
+    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
307
+    faces = detector(gray)
308
+
309
+    # Iterator to count faces
310
+    i = 0
311
+    for face in faces:
312
+
313
+        # Get the coordinates of faces
314
+        x, y = face.left(), face.top()
315
+        x1, y1 = face.right(), face.bottom()
316
+        cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)
317
+    # Increment iterator for each face in faces
318
+        i = i+1
319
+
320
+    # Display the box and faces
321
+        cv2.putText(frame, 'face num'+str(i), (x-10, y-10),
322
+                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
323
+        
324
+        # if len(i)>1:
325
+        #     print(i)
326
+        number_of_faces.append(i)
327
+
328
+
329
+    if (len(number_of_faces))>1:
330
+        print("Group Photo")
331
+        return "Group Photo"
332
+    elif (len(number_of_faces))==1:
333
+        print("Single Photo")
334
+        pass
335
+
336
+
337
+
338
+
339
+    def readEncodingsPickle(fname):
340
+        """
341
+        Read Pickle file.
342
+
343
+        Parameters
344
+        ----------
345
+        fname : String
346
+            Name of pickle file.(Full location)
347
+
348
+        Returns
349
+        -------
350
+        encodings : list of np arrays
351
+            list of all saved encodings
352
+        names : List of Strings
353
+            List of all saved names
354
+
355
+        """
356
+
357
+
358
+        data = pickle.loads(open(fname, "rb").read())
359
+
360
+
361
+        data = np.array(data)
362
+        encodings = [d["encoding"] for d in data]
363
+        names = [d["name"] for d in data]
364
+        return encodings, names
365
+
366
+
367
+    def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
368
+        """
369
+        Compares face encodings to check if 2 faces are same or not.
370
+
371
+        Parameters
372
+        ----------
373
+        unknown_encoding : np array
374
+            Face encoding of unknown people.
375
+        known_encodings : np array
376
+            Face encodings of known people.
377
+        known_names : list of strings
378
+            Names of known people
379
+
380
+        Returns
381
+        -------
382
+        acceptBool : Bool
383
+            face matched or not
384
+        duplicateName : String
385
+            Name of matched face
386
+        distance : Float
387
+            Distance between 2 faces
388
+
389
+        """
390
+        duplicateName = ""
391
+        distance = 0.0
392
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.5)
393
+
394
+        face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
395
+
396
+        best_match_index = np.argmin(face_distances)
397
+
398
+        distance = face_distances[best_match_index]
399
+        if matches[best_match_index]:
400
+            acceptBool = True
401
+            duplicateName = known_names[best_match_index]
402
+        else:
403
+            acceptBool = False
404
+            duplicateName = ""
405
+        return acceptBool, duplicateName, distance
406
+
407
+        #p = []
408
+
409
+
410
+    def processDatasetImages(path=path, pickle_location=pickle_location):
411
+        """
412
+        Process image in dataset from where you want to separate images.
413
+        It separates the images into directories of known people, groups and any unknown people images.
414
+        Parameters
415
+        ----------
416
+        path : STRING, optional
417
+            Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
418
+            It should be noted that each image in this dataset should contain only 1 face.
419
+        saveLocation : STRING, optional
420
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
421
+
422
+        Returns
423
+        -------
424
+        None.
425
+
426
+        """
427
+        # Read pickle file for known people to compare faces from
428
+
429
+        people_encodings, names = readEncodingsPickle(pickle_location)
430
+        # print(p)
431
+        #  imgPath = path + img
432
+
433
+        # Read image
434
+        # path=r"C:\Users\katku\Pictures\final\100011460000611.jpg"
435
+        image = cv2.imread(path)
436
+        #orig = image.copy()
437
+
438
+        # Resize
439
+        image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
440
+
441
+        # Get locations and encodings
442
+        encs, locs = createEncodings(image)
443
+
444
+        # Save image to a group image folder if more than one face is in image
445
+        # if len(locs) > 1:
446
+        #     saveImageToDirectory(orig, "Group", img)
447
+
448
+        # Processing image for each face
449
+        i = 0
450
+        knownFlag = 0
451
+        for loc in locs:
452
+            top, right, bottom, left = loc
453
+            unknown_encoding = encs[i]
454
+            i += 1
455
+            acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
456
+            if acceptBool:
457
+                # saveImageToDirectory(orig, duplicateName,name)
458
+                knownFlag = 1
459
+        if knownFlag == 1:
460
+            print("Match Found")
461
+
462
+
463
+            #print(path)
464
+            with_extension = path.split("/")[-1]
465
+            without_extension = with_extension.split(".")[0]
466
+
467
+            # output_s = {"FileID": without_extension,
468
+            #             "Date": India_Date,
469
+            #             "Time": India_Time}
470
+            # output_json = json.dumps(output_s)
471
+            output_json='Matched successfully'
472
+            print(loc)
473
+            lst.append(output_json)
474
+
475
+            print(output_json)
476
+            # exit()
477
+
478
+        else:
479
+            print('Not Matched')
480
+            pass
481
+            # saveImageToDirectory(orig, "0",name)
482
+
483
+
484
+    import numpy as np
485
+    import json
486
+
487
+    processDatasetImages(path, pickle_location)
488
+    return lst[0]
489
+    #return 'matched successfully'
490
+
491
+
492
+@app.route('/test_detect', methods=["POST"])
493
+def detect():
494
+    if __name__ == "__main__":
495
+
496
+        url_list=[]
497
+        Dataset=  request.get_json()
498
+
499
+        # id = "100013660000125"
500
+        url_list.append(Dataset)
501
+        # multiprocessing
502
+        with multiprocessing.Pool(processes=2) as pool:
503
+            try:
504
+                results = pool.map(submit, url_list)
505
+            except FileNotFoundError:
506
+                return 'plese get registered with your PhotoID'
507
+            except IndexError:
508
+                #return 'unable to recognize face'
509
+                return 'failed'
510
+
511
+        pool.close()
512
+
513
+        return results[0]
514
+
515
+
516
+
517
+
518
+@app.route('/test_register', methods=["POST"])
519
+def register():
520
+    print("hello start..........")
521
+    if __name__ == "__main__":
522
+
523
+        url_list=[]
524
+        Dataset=  request.get_json()
525
+        # referrer = request.headers
526
+        # print(referrer)
527
+        # id = "100013660000125"
528
+        url_list.append(Dataset)
529
+        UserLocaton=Dataset["FilePath"]
530
+        print(UserLocaton)
531
+        if "c02" in UserLocaton:
532
+            return "Please update url in integration"
533
+        else:
534
+            pass
535
+        # multiprocessing
536
+        with multiprocessing.Pool(processes=2) as pool:
537
+            try:
538
+                results = pool.map(registered, url_list)
539
+            except IndexError:
540
+                pass
541
+                print('face not found')
542
+            except FileNotFoundError:
543
+                pass
544
+                
545
+            
546
+                #os.remove(img)
547
+               # return 'unable to recognize face'
548
+
549
+        pool.close()
550
+        #return results[0]
551
+        return 'Successfully saved encoding.........'
552
+
553
+
554
+
555
+
556
+if __name__ == "__main__":
557
+    app.run(host='0.0.0.0',port =5004,debug=False)

+ 559
- 0
Attendence/uat_attendence.py Целия файл

@@ -0,0 +1,559 @@
1
+from flask import Flask, render_template, request, redirect, Response, send_file
2
+import multiprocessing
3
+import face_recognition
4
+#from numba import jit
5
+import numpy as np
6
+import os
7
+#from flask_cors import CORS
8
+app = Flask(__name__)
9
+#CORS(app)
10
+lst = []
11
+
12
+
13
+@app.route('/', methods=['GET'])
14
+def resume():
15
+    #return render_template('index.html')
16
+    return 'Attendence app running'
17
+
18
+def createEncodings(image):
19
+    print("Encoding")
20
+    """
21
+    Create face encodings for a given image and also return face locations in the given image.
22
+    Parameters
23
+    -------
24
+    image : cv2 mat
25
+        Image you want to detect faces from.
26
+    Returns
27
+    -------
28
+    known_encodings : list of np array
29
+        List of face encodings in a given image
30
+    face_locations : list of tuples
31
+        list of tuples for face locations in a given image
32
+    """
33
+    # Find face locations for all faces in an image
34
+    face_locations = face_recognition.face_locations(image)
35
+    # Create encodings for all faces in an image
36
+    known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
37
+    return known_encodings, face_locations
38
+#@app.route('/registered', methods=["POST","GET"])
39
+def registered(url_list):
40
+    input=url_list
41
+
42
+    from pathlib import Path
43
+    Path("ppeople").mkdir(exist_ok=True)
44
+    Path("ppeople/" + input["FileName"]).mkdir(exist_ok=True)
45
+
46
+    a = input
47
+    # print(a)
48
+    x = a['FileData']
49
+    # print(x)
50
+    y = a['FileName']
51
+    #z = a['FileType']
52
+    z='jpg'
53
+    # CreatedBy=a['CreatedBy']
54
+
55
+    name = y+ '.'+ z
56
+    print(name)
57
+    # print(y)
58
+    # image = y.split("/")
59
+    # filename=image[-1]
60
+
61
+    # print(x)
62
+    try:
63
+        img_data = x.encode()
64
+    except AttributeError:
65
+        return "Successfully saved encoding........."
66
+
67
+    import base64
68
+
69
+    with open("ppeople/" + input["FileName"] + "/" + name, "wb") as fh:
70
+        fh.write(base64.decodebytes(img_data))
71
+
72
+    img = "ppeople/" + y + "/" + name
73
+    saveLocation = "ppeople/" + y + "/" + y + ".pickle"
74
+
75
+    ############ detecting no of faceses #######################
76
+
77
+
78
+    # import cv2
79
+    # import numpy as np
80
+    # import dlib
81
+
82
+
83
+    # # Connects to your computer's default camera
84
+    # cap = cv2.imread(img)
85
+
86
+    # # Detect the coordinates
87
+    # detector = dlib.get_frontal_face_detector()
88
+
89
+    # number_of_faces=[]
90
+    # # Capture frames continuously
91
+    # # while True:
92
+
93
+    # # Capture frame-by-frame
94
+    # # ret, frame = cap
95
+    # frame = cap
96
+
97
+    # # RGB to grayscale
98
+    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
99
+    # faces = detector(gray)
100
+
101
+    # # Iterator to count faces
102
+    # i = 0
103
+    # for face in faces:
104
+
105
+    #     # Get the coordinates of faces
106
+    #     x, y = face.left(), face.top()
107
+    #     x1, y1 = face.right(), face.bottom()
108
+    #     cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)
109
+    # # Increment iterator for each face in faces
110
+    #     i = i+1
111
+
112
+    # # Display the box and faces
113
+    #     cv2.putText(frame, 'face num'+str(i), (x-10, y-10),
114
+    #                 cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
115
+        
116
+    #     # if len(i)>1:
117
+    #     #     print(i)
118
+    #     number_of_faces.append(i)
119
+
120
+
121
+    # if (len(number_of_faces))>1:
122
+    #     print("Group Photo")
123
+    #     return "Group Photo"
124
+    # elif (len(number_of_faces))==1:
125
+    #     print("Single Photo")
126
+    #     pass
127
+
128
+
129
+
130
+
131
+
132
+    def saveEncodings(encs, names, fname='encodings.pickle'):
133
+        """
134
+        Save encodings in a pickle file to be used in future.
135
+
136
+        Parameters
137
+        ----------
138
+        encs : List of np arrays
139
+            List of face encodings.
140
+        names : List of strings
141
+            List of names for each face encoding.
142
+        fname : String, optional
143
+            Name/Location for pickle file. The default is "encodings.pickle".
144
+
145
+        Returns
146
+        -------
147
+        None.
148
+
149
+        """
150
+
151
+        data = []
152
+        d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
153
+        data.extend(d)
154
+
155
+        encodingsFile = fname
156
+
157
+        # dump the facial encodings data to disk
158
+        print("[INFO] serializing encodings...")
159
+        print("[INFO] Encodings Created sucessfully")
160
+        f = open(encodingsFile, "wb")
161
+        f.write(pickle.dumps(data))
162
+        f.close()
163
+
164
+    # Function to create encodings and get face locations
165
+
166
+
167
+    def processKnownPeopleImages(img=img, saveLocation=saveLocation):
168
+        """
169
+        Process images of known people and create face encodings to compare in future.
170
+        Eaach image should have just 1 face in it.
171
+        Parameters
172
+        ----------
173
+        path : STRING, optional
174
+            Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
175
+            It should be noted that each image in this dataset should contain only 1 face.
176
+        saveLocation : STRING, optional
177
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
178
+        Returns
179
+        -------
180
+        None.
181
+        """
182
+        known_encodings = []
183
+        known_names = []
184
+        # for img in os.listdir(path):
185
+
186
+        imgPath = img
187
+        # Read image
188
+        image = cv2.imread(imgPath)
189
+        name = img.rsplit('.')[0]
190
+        # Resize
191
+        try:
192
+            print(image.shape)
193
+        except AttributeError:
194
+            return "Successfully saved encoding........."
195
+        image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
196
+        # Get locations and encodings
197
+        encs, locs = createEncodings(image)
198
+        try:
199
+            known_encodings.append(encs[0])
200
+        except IndexError:
201
+            os.remove(saveLocation)
202
+            print('------------------------------------- save location --------------------------------')
203
+            print(saveLocation)
204
+            return "hello world!"
205
+        
206
+            
207
+        # known_encodings.append(encs[0])
208
+        known_names.append(name)
209
+        for loc in locs:
210
+            top, right, bottom, left = loc
211
+        # Show Image
212
+        # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
213
+        # cv2.imshow("Image", image)
214
+        # cv2.waitKey(1)
215
+        # cv2.destroyAllWindows()
216
+        saveEncodings(known_encodings, known_names, saveLocation)
217
+
218
+    import cv2
219
+    #import face_recognition
220
+    import pickle
221
+
222
+    processKnownPeopleImages(img, saveLocation)
223
+    return 'Successfully saved encoding.........'
224
+
225
+
226
+# ********************************   COMPARUISION *********************************************************
227
+#@app.route('/submit', methods=["POST","GET"])
228
+def submit(url_list):
229
+
230
+    from datetime import datetime
231
+    import pytz
232
+    tz_NY = pytz.timezone('Asia/Kolkata')
233
+    datetime_NY = datetime.now(tz_NY)
234
+    India_Date = (datetime_NY.strftime("%Y-%m-%d"))
235
+    India_Date = str(India_Date)
236
+    # India_Time = (datetime_NY.strftime("%I:%M:%S %p"))
237
+    # India_Time = str(India_Time)
238
+    input=url_list
239
+    import pickle
240
+    import cv2
241
+
242
+    from pathlib import Path
243
+    Path("GGallery/"+ India_Date).mkdir(exist_ok=True)
244
+    Path("GGallery/"+ India_Date +'/'+ input["FileName"]).mkdir(exist_ok=True)
245
+
246
+    a = input
247
+    # print(a)
248
+    x = a['FileData']
249
+    # print(x)
250
+    y = a['FileName']
251
+   # z = a['FileType']
252
+    z='jpg'
253
+    # CreatedBy=a['CreatedBy']
254
+
255
+    name = y + '.' + z
256
+    # print(name)
257
+    # print(y)
258
+    # image = y.split("/")
259
+    # filename=image[-1]
260
+
261
+    # print(x)
262
+    img_data = x.encode()
263
+
264
+    import base64
265
+    
266
+
267
+    with open("GGallery/"+India_Date+'/' + input["FileName"] + "/" + name, "wb") as fh:
268
+        fh.write(base64.decodebytes(img_data))
269
+
270
+    path = "GGallery/" +India_Date+'/'+ y + "/" + name
271
+    pickle_location = "ppeople/" + y + "/" + y + ".pickle"
272
+    import pathlib
273
+    file = pathlib.Path(pickle_location)
274
+    if file.exists ():
275
+        pass
276
+    else:
277
+        print ("pickle File not exist")
278
+        print(name)
279
+        return "Face not found in profile (please change your profile)"
280
+
281
+    check_faces="ppeople/" + y + "/" + y + ".jpg"
282
+    print(check_faces)
283
+
284
+
285
+    ############ detecting no of faceses #######################
286
+
287
+
288
+    import cv2
289
+    import numpy as np
290
+    import dlib
291
+
292
+
293
+    # Connects to your computer's default camera
294
+    cap = cv2.imread(check_faces)
295
+
296
+    # Detect the coordinates
297
+    detector = dlib.get_frontal_face_detector()
298
+
299
+    number_of_faces=[]
300
+    # Capture frames continuously
301
+    # while True:
302
+
303
+    # Capture frame-by-frame
304
+    # ret, frame = cap
305
+    frame = cap
306
+
307
+    # RGB to grayscale
308
+    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
309
+    faces = detector(gray)
310
+
311
+    # Iterator to count faces
312
+    i = 0
313
+    for face in faces:
314
+
315
+        # Get the coordinates of faces
316
+        x, y = face.left(), face.top()
317
+        x1, y1 = face.right(), face.bottom()
318
+        cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)
319
+    # Increment iterator for each face in faces
320
+        i = i+1
321
+
322
+    # Display the box and faces
323
+        cv2.putText(frame, 'face num'+str(i), (x-10, y-10),
324
+                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
325
+        
326
+        # if len(i)>1:
327
+        #     print(i)
328
+        number_of_faces.append(i)
329
+
330
+
331
+    if (len(number_of_faces))>1:
332
+        print("Group Photo")
333
+        return "Group Photo"
334
+    elif (len(number_of_faces))==1:
335
+        print("Single Photo")
336
+        pass
337
+
338
+
339
+
340
+
341
+    def readEncodingsPickle(fname):
342
+        """
343
+        Read Pickle file.
344
+
345
+        Parameters
346
+        ----------
347
+        fname : String
348
+            Name of pickle file.(Full location)
349
+
350
+        Returns
351
+        -------
352
+        encodings : list of np arrays
353
+            list of all saved encodings
354
+        names : List of Strings
355
+            List of all saved names
356
+
357
+        """
358
+
359
+
360
+        data = pickle.loads(open(fname, "rb").read())
361
+
362
+
363
+        data = np.array(data)
364
+        encodings = [d["encoding"] for d in data]
365
+        names = [d["name"] for d in data]
366
+        return encodings, names
367
+
368
+
369
+    def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
370
+        """
371
+        Compares face encodings to check if 2 faces are same or not.
372
+
373
+        Parameters
374
+        ----------
375
+        unknown_encoding : np array
376
+            Face encoding of unknown people.
377
+        known_encodings : np array
378
+            Face encodings of known people.
379
+        known_names : list of strings
380
+            Names of known people
381
+
382
+        Returns
383
+        -------
384
+        acceptBool : Bool
385
+            face matched or not
386
+        duplicateName : String
387
+            Name of matched face
388
+        distance : Float
389
+            Distance between 2 faces
390
+
391
+        """
392
+        duplicateName = ""
393
+        distance = 0.0
394
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.54)
395
+        face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
396
+
397
+        best_match_index = np.argmin(face_distances)
398
+
399
+        distance = face_distances[best_match_index]
400
+        if matches[best_match_index]:
401
+            acceptBool = True
402
+            duplicateName = known_names[best_match_index]
403
+        else:
404
+            acceptBool = False
405
+            duplicateName = ""
406
+        return acceptBool, duplicateName, distance
407
+
408
+        #p = []
409
+
410
+
411
+    def processDatasetImages(path=path, pickle_location=pickle_location):
412
+        """
413
+        Process image in dataset from where you want to separate images.
414
+        It separates the images into directories of known people, groups and any unknown people images.
415
+        Parameters
416
+        ----------
417
+        path : STRING, optional
418
+            Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
419
+            It should be noted that each image in this dataset should contain only 1 face.
420
+        saveLocation : STRING, optional
421
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
422
+
423
+        Returns
424
+        -------
425
+        None.
426
+
427
+        """
428
+        # Read pickle file for known people to compare faces from
429
+
430
+        people_encodings, names = readEncodingsPickle(pickle_location)
431
+        # print(p)
432
+        #  imgPath = path + img
433
+
434
+        # Read image
435
+        # path=r"C:\Users\katku\Pictures\final\100011460000611.jpg"
436
+        image = cv2.imread(path)
437
+        #orig = image.copy()
438
+
439
+        # Resize
440
+        image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
441
+
442
+        # Get locations and encodings
443
+        encs, locs = createEncodings(image)
444
+
445
+        # Save image to a group image folder if more than one face is in image
446
+        # if len(locs) > 1:
447
+        #     saveImageToDirectory(orig, "Group", img)
448
+
449
+        # Processing image for each face
450
+        i = 0
451
+        knownFlag = 0
452
+        for loc in locs:
453
+            top, right, bottom, left = loc
454
+            unknown_encoding = encs[i]
455
+            i += 1
456
+            acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
457
+            if acceptBool:
458
+                # saveImageToDirectory(orig, duplicateName,name)
459
+                knownFlag = 1
460
+        if knownFlag == 1:
461
+            print("Match Found")
462
+
463
+
464
+            #print(path)
465
+            with_extension = path.split("/")[-1]
466
+            without_extension = with_extension.split(".")[0]
467
+
468
+            # output_s = {"FileID": without_extension,
469
+            #             "Date": India_Date,
470
+            #             "Time": India_Time}
471
+            # output_json = json.dumps(output_s)
472
+            output_json='Matched successfully'
473
+            print(loc)
474
+            lst.append(output_json)
475
+
476
+            print(output_json)
477
+            # exit()
478
+
479
+        else:
480
+            print('Not Matched')
481
+            pass
482
+            # saveImageToDirectory(orig, "0",name)
483
+
484
+
485
+    import numpy as np
486
+    import json
487
+
488
+    processDatasetImages(path, pickle_location)
489
+    return lst[0]
490
+    #return 'matched successfully'
491
+
492
+
493
+@app.route('/uat01_detect', methods=["POST"])
494
+def detect():
495
+    if __name__ == "__main__":
496
+
497
+        url_list=[]
498
+        Dataset=  request.get_json()
499
+        # id = "100013660000125"
500
+        url_list.append(Dataset)
501
+        # multiprocessing
502
+        pool_size = multiprocessing.cpu_count() * 2
503
+        with multiprocessing.Pool(pool_size) as pool:
504
+            try:
505
+                results = pool.map(submit, url_list)
506
+            except FileNotFoundError:
507
+                return 'plese get registered with your PhotoID'
508
+            except IndexError:
509
+                #return 'unable to recognize face'
510
+                return 'failed'
511
+
512
+        pool.close()
513
+
514
+        return results[0]
515
+
516
+
517
+
518
+@app.route('/uat01_register', methods=["POST"])
519
+def register():
520
+    print("hello start..........")
521
+    if __name__ == "__main__":
522
+
523
+        url_list=[]
524
+        Dataset=  request.get_json()
525
+        # id = "100013660000125"
526
+        url_list.append(Dataset)
527
+        UserLocation=Dataset["FilePath"]
528
+        print(UserLocation)
529
+
530
+        # if "cO2" in UserLocation or UserLocation is None:
531
+        #     pass
532
+        # else:
533
+        #     return "Please update the URL in the integration"
534
+
535
+
536
+        # multiprocessing
537
+        pool_size = multiprocessing.cpu_count() * 2
538
+        with multiprocessing.Pool(pool_size) as pool:
539
+            try:
540
+                results = pool.map(registered, url_list)
541
+            except IndexError:
542
+                pass
543
+                print('face not found')
544
+            except FileNotFoundError:
545
+                pass
546
+                
547
+            
548
+                #os.remove(img)
549
+               # return 'unable to recognize face'
550
+
551
+        pool.close()
552
+        #return results[0]
553
+        return 'Successfully saved encoding.........'
554
+
555
+
556
+
557
+
558
+if __name__ == "__main__":
559
+    app.run(host='0.0.0.0',port =5005,debug=False)

Loading…
Отказ
Запис