import pickle import numpy as np import face_recognition import os import cv2 import datetime import click import requests @click.command() @click.argument('eventid', default='') def predict456(eventid): original_working_directory = os.getcwd() new_networked_directory = r'\\192.168.88.99\\Bizgaze\\port6003\\wwwroot\\_files\\' # change to the networked directory os.chdir(new_networked_directory) #People = './ALL_UNQ/' + eventid + "/" People="./ALL_UNQ/"+ eventid + "/" #Gallery = './Copy_Gallery/'+ eventid + "/" Gallery='./1/CopyGallery/'+ eventid + "/" x= datetime.datetime.now() print('ALLunq_copy_gallery Running') print('Execution Started at:',x) def saveEncodings(encs, names, fname='encodings.pickle'): """ Save encodings in a pickle file to be used in future. Parameters ---------- encs : List of np arrays List of face encodings. names : List of strings List of names for each face encoding. fname : String, optional Name/Location for pickle file. The default is "encodings.pickle". Returns ------- None. """ data = [] d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)] data.extend(d) encodingsFile = fname # dump the facial encodings data to disk print("[INFO] serializing encodings...") f = open(encodingsFile, "wb") f.write(pickle.dumps(data)) f.close() # Function to read encodings def readEncodingsPickle(fname): """ Read Pickle file. Parameters ---------- fname : String Name of pickle file.(Full location) Returns ------- encodings : list of np arrays list of all saved encodings names : List of Strings List of all saved names """ data = pickle.loads(open(fname, "rb").read()) data = np.array(data) encodings = [d["encoding"] for d in data] names = [d["name"] for d in data] return encodings, names # Function to create encodings and get face locations def createEncodings(image): print("Encoding") """ Create face encodings for a given image and also return face locations in the given image. Parameters ---------- image : cv2 mat Image you want to detect faces from. Returns ------- known_encodings : list of np array List of face encodings in a given image face_locations : list of tuples list of tuples for face locations in a given image """ # Find face locations for all faces in an image face_locations = face_recognition.face_locations(image) # Create encodings for all faces in an image known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations) return known_encodings, face_locations # Function to compare encodings def compareFaceEncodings(unknown_encoding, known_encodings, known_names): """ Compares face encodings to check if 2 faces are same or not. Parameters ---------- unknown_encoding : np array Face encoding of unknown people. known_encodings : np array Face encodings of known people. known_names : list of strings Names of known people Returns ------- acceptBool : Bool face matched or not duplicateName : String Name of matched face distance : Float Distance between 2 faces """ duplicateName = "" distance = 0.0 matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47) face_distances = face_recognition.face_distance(known_encodings, unknown_encoding) best_match_index = np.argmin(face_distances) distance = face_distances[best_match_index] if matches[best_match_index]: acceptBool = True duplicateName = known_names[best_match_index] else: acceptBool = False duplicateName = "" return acceptBool, duplicateName, distance p = [] def f_CSVwrite(): import pandas as pd q = pd.DataFrame(p) #print(q) m = q # print(m) # x.drop(x.columns[Unnam], axis=1, inplace=True) df = m.groupby([0], as_index=False).count() z = df[0].str.split('/', expand=True) z.to_csv('zzzzzzzzzzzzz.csv',index=False) import pandas as pd df2 = pd.read_csv('./zzzzzzzzzzzzz.csv') df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True) df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True) df2 = df2[['Matched', 'test']] import pandas as pd import os c = [] for root, dirs, files in os.walk(Gallery, topdown=False): for name in files: # print(name) L = os.path.join(root, name) c.append(L) df = pd.DataFrame(c) df1 = df[0].str.split("/", expand=True) #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True) # print('this is df1') # print(df1) df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True) merge = pd.merge(df2, df1, on='test', how='left') merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True) # merge.to_csv('merge.csv') mergesplit = merge.loc[:, 'test'].str.split(".", expand=True) mergesplit.rename({mergesplit.columns[-2]: 'ImageName1'}, axis=1, inplace=True) mergesplit = mergesplit.loc[:, 'ImageName1'] #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True) #merge['EventName'] = merge['abc'] merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test'] merge['Matched']='\\_files\\ALL_UNQ\\'+eventid+'\\'+df2['Matched']+'.jpg' merge["ImageName"]=df2['Matched']+'.jpg' frames = [merge, mergesplit] r = pd.concat(frames, axis=1, join='inner') df2 = r.dropna(subset=['Matched']) #df2['Matched'] = df2['Matched'].astype(str) #df2['Matched'] = df2['Matched'].astype(int) column_list = ['Matched', 'ImageName','Imagepath', 'ImageName1', 'EventName'] df2[column_list].to_csv('Zero_Gallery123254.csv', index=False) df2[column_list].to_json('events12554.json', orient="records") # import json # # with open('events.json', 'r') as json_file: # json_load = json.load(json_file) # # print(json_load) import requests import json with open('events12554.json', 'r') as json_file: json_load = json.load(json_file) url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/testevents/unregistereduser" payload = json.dumps(json_load).replace("]", "").replace("[", "") print(payload) headers = { 'Authorization': 'stat e44ced3eff684aa9b932672ea8406029', 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print("##############################################################") print(response.text) p.clear() # Save Image to new directory def saveImageToDirectory(image, name, imageName): """ Saves images to directory. Parameters ---------- image : cv2 mat Image you want to save. name : String Directory where you want the image to be saved. imageName : String Name of image. Returns ------- None. """ path = original_working_directory+"/Allunq_CopyGallery/" + name path1 = original_working_directory+"/Allunq_CopyGallery/" + name if os.path.exists(path): pass else: if not os.path.exists(path): os.makedirs(path) # os.mkdir(path,exist_ok=True) cv2.imwrite(path + "/" + imageName, image) x = [] c = (path1 + "/" + imageName) x.append(c) p.append(x) f_CSVwrite() # Function for creating encodings for known people def processKnownPeopleImages(path=People, saveLocation="./Zero_gallery_known_encodings.pickle"): """ Process images of known people and create face encodings to compare in future. Eaach image should have just 1 face in it. Parameters ---------- path : STRING, optional Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People". It should be noted that each image in this dataset should contain only 1 face. saveLocation : STRING, optional Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory". Returns ------- None. """ known_encodings = [] known_names = [] for img in os.listdir(path): imgPath = path + img # Read image image = cv2.imread(imgPath) name = img.rsplit('.')[0] # Resize print(imgPath) import pathlib file = pathlib.Path(str(path+"Thumbs.db")) if file.exists (): os.remove(path+"Thumbs.db") else: pass image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR) # Get locations and encodings encs, locs = createEncodings(image) try: known_encodings.append(encs[0]) except IndexError: os.remove(People+img) #known_encodings.append(encs[568]) known_names.append(name) for loc in locs: top, right, bottom, left = loc # Show Image #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2) # cv2.imshow("Image", image) # cv2.waitKey(1) #cv2.destroyAllWindows() saveEncodings(known_encodings, known_names, saveLocation) # Function for processing dataset images def processDatasetImages(saveLocation="./Gallery_encodings.pickle"): """ Process image in dataset from where you want to separate images. It separates the images into directories of known people, groups and any unknown people images. Parameters ---------- path : STRING, optional Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People". It should be noted that each image in this dataset should contain only 1 face. saveLocation : STRING, optional Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory". Returns ------- None. """ # Read pickle file for known people to compare faces from people_encodings, names = readEncodingsPickle("./Zero_gallery_known_encodings.pickle") for root, dirs, files in os.walk(Gallery, topdown=False): for name in files: s = os.path.join(root, name) #print(p) # imgPath = path + img # Read image image = cv2.imread(s) orig = image.copy() # Resize image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR) # Get locations and encodings encs, locs = createEncodings(image) # Save image to a group image folder if more than one face is in image # if len(locs) > 1: # saveImageToDirectory(orig, "Group", img) # Processing image for each face i = 0 knownFlag = 0 for loc in locs: top, right, bottom, left = loc unknown_encoding = encs[i] i += 1 acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names) if acceptBool: saveImageToDirectory(orig, duplicateName,name) knownFlag = 1 if knownFlag == 1: print("Match Found") else: saveImageToDirectory(orig, "0",name) # Show Image # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2) # # cv2.imshow("Image", image) # cv2.waitKey(1) # cv2.destroyAllWindows() def main(): """ Main Function. Returns ------- None. """ processKnownPeopleImages() processDatasetImages() # import pandas as pd # q = pd.DataFrame(p) # #print(q) # m = q # # print(m) # # x.drop(x.columns[Unnam], axis=1, inplace=True) # df = m.groupby([0], as_index=False).count() # z = df[0].str.split('/', expand=True) # z.to_csv('zzzzzzzzzzzzz.csv',index=False) # import pandas as pd # df2 = pd.read_csv('./zzzzzzzzzzzzz.csv') # df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True) # df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True) # df2 = df2[['Matched', 'test']] # import pandas as pd # import os # c = [] # for root, dirs, files in os.walk(Gallery, # topdown=False): # for name in files: # # print(name) # L = os.path.join(root, name) # c.append(L) # df = pd.DataFrame(c) # df1 = df[0].str.split("/", expand=True) # #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True) # # print('this is df1') # # print(df1) # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True) # merge = pd.merge(df2, df1, on='test', how='left') # merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True) # # merge.to_csv('merge.csv') # mergesplit = merge.loc[:, 'test'].str.split(".", expand=True) # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True) # mergesplit = mergesplit.loc[:, 'ImageName'] # #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True) # #merge['EventName'] = merge['abc'] # merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test'] # merge['Matched']='\\_files\\ALL_UNQ\\'+eventid+'\\'+df2['Matched']+'.jpg' # merge["MatchedImageName"]=df2['Matched'] # frames = [merge, mergesplit] # r = pd.concat(frames, axis=1, join='inner') # df2 = r.dropna(subset=['Matched']) # #df2['Matched'] = df2['Matched'].astype(str) # #df2['Matched'] = df2['Matched'].astype(int) # column_list = ['Matched','MatchedImageName','Imagepath', 'ImageName', 'EventName'] # df2[column_list].to_csv('Zero_Gallery123254.csv', index=False) # df2[column_list].to_json('events12554.json', orient="records") # import json # # with open('events.json', 'r') as json_file: # json_load = json.load(json_file) # # print(json_load) # import requests # import json # with open('events12554.json', 'r') as json_file: # json_load = json.load(json_file) # #url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/testevents/unregistereduser" # payload = json.dumps(json_load).replace("]", "").replace("[", "") # print(payload) # headers = { # 'Authorization': 'stat e44ced3eff684aa9b932672ea8406029', # 'Content-Type': 'application/json' # } # response = requests.request("POST", url, headers=headers, data=payload) # print("##############################################################") # print(response.text) print("Completed") if __name__ == "__main__": main() # return render_template('index.html') y=datetime.datetime.now() print('Completed at:',y) z=y-x print('Time Taken:',z) return (str(y-x)) #return 'ALL IMAGES MATCHED' predict456()