|
@@ -0,0 +1,517 @@
|
|
1
|
+import pickle
|
|
2
|
+import numpy as np
|
|
3
|
+import face_recognition
|
|
4
|
+import os
|
|
5
|
+from flask import Flask, render_template, request, redirect, send_file
|
|
6
|
+#import shutil
|
|
7
|
+import cv2
|
|
8
|
+
|
|
9
|
+app = Flask(__name__)
|
|
10
|
+app.config["IMAGE_UPLOADS"] = "C:/Users/Bizgaze/PycharmProjects/face_recogniction/People"
|
|
11
|
+datasetPath = "/opt/bizgaze/events.bizgaze.app/wwwroot/_files/1/Gallery/"
|
|
12
|
+peoplePath = "/opt/bizgaze/events.bizgaze.app/wwwroot/_files/People/"
|
|
13
|
+@app.route('/', methods=['GET'])
|
|
14
|
+def home():
|
|
15
|
+ return render_template('index.html')
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+@app.route('/Display', methods=['GET', "POST"])
|
|
19
|
+def Display():
|
|
20
|
+ return render_template('Display.html')
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+@app.route("/upload", methods=["GET", "POST"])
|
|
24
|
+def upload():
|
|
25
|
+ if request.method == "POST":
|
|
26
|
+
|
|
27
|
+ if request.files:
|
|
28
|
+
|
|
29
|
+ image = request.files["image"]
|
|
30
|
+ try:
|
|
31
|
+ image.save(os.path.join(
|
|
32
|
+ app.config["IMAGE_UPLOADS"], image.filename))
|
|
33
|
+ except IsADirectoryError:
|
|
34
|
+ return render_template('index.html')
|
|
35
|
+ # image.save(os.path.join(
|
|
36
|
+ # app.config["IMAGE_UPLOADS"], image.filename))
|
|
37
|
+
|
|
38
|
+ print("Image saved")
|
|
39
|
+
|
|
40
|
+ return redirect(request.url)
|
|
41
|
+
|
|
42
|
+ return 'ok'
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+@app.route('/predict', methods=["GET", "POST"])
|
|
46
|
+def predict():
|
|
47
|
+ Dataset = request.get_json()
|
|
48
|
+ a = Dataset
|
|
49
|
+ peoplePath = a['People']
|
|
50
|
+ print(peoplePath)
|
|
51
|
+ datasetPath = a['Gallery']
|
|
52
|
+ print(datasetPath)
|
|
53
|
+
|
|
54
|
+ print('starting')
|
|
55
|
+ def saveEncodings(encs, names, fname="encodings.pickle"):
|
|
56
|
+ print('encoding')
|
|
57
|
+ """
|
|
58
|
+ Save encodings in a pickle file to be used in future.
|
|
59
|
+
|
|
60
|
+ Parameters
|
|
61
|
+ ----------
|
|
62
|
+ encs : List of np arrays
|
|
63
|
+ List of face encodings.
|
|
64
|
+ names : List of strings
|
|
65
|
+ List of names for each face encoding.
|
|
66
|
+ fname : String, optional
|
|
67
|
+ Name/Location for pickle file. The default is "encodings.pickle".
|
|
68
|
+
|
|
69
|
+ Returns
|
|
70
|
+ -------
|
|
71
|
+ None.
|
|
72
|
+
|
|
73
|
+ """
|
|
74
|
+
|
|
75
|
+ data = []
|
|
76
|
+ d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
|
|
77
|
+ data.extend(d)
|
|
78
|
+
|
|
79
|
+ encodingsFile = fname
|
|
80
|
+
|
|
81
|
+ # dump the facial encodings data to disk
|
|
82
|
+ print("[INFO] serializing encodings...")
|
|
83
|
+ f = open(encodingsFile, "wb")
|
|
84
|
+ f.write(pickle.dumps(data))
|
|
85
|
+ f.close()
|
|
86
|
+
|
|
87
|
+ # Function to read encodings
|
|
88
|
+
|
|
89
|
+ def readEncodingsPickle(fname):
|
|
90
|
+ """
|
|
91
|
+ Read Pickle file.
|
|
92
|
+
|
|
93
|
+ Parameters
|
|
94
|
+ ----------
|
|
95
|
+ fname : String
|
|
96
|
+ Name of pickle file.(Full location)
|
|
97
|
+
|
|
98
|
+ Returns
|
|
99
|
+ -------
|
|
100
|
+ encodings : list of np arrays
|
|
101
|
+ list of all saved encodings
|
|
102
|
+ names : List of Strings
|
|
103
|
+ List of all saved names
|
|
104
|
+
|
|
105
|
+ """
|
|
106
|
+
|
|
107
|
+ data = pickle.loads(open(fname, "rb").read())
|
|
108
|
+ data = np.array(data)
|
|
109
|
+ encodings = [d["encoding"] for d in data]
|
|
110
|
+ names = [d["name"] for d in data]
|
|
111
|
+ return encodings, names
|
|
112
|
+
|
|
113
|
+ # Function to create encodings and get face locations
|
|
114
|
+ def createEncodings(image):
|
|
115
|
+ """
|
|
116
|
+ Create face encodings for a given image and also return face locations in the given image.
|
|
117
|
+
|
|
118
|
+ Parameters
|
|
119
|
+ ----------
|
|
120
|
+ image : cv2 mat
|
|
121
|
+ Image you want to detect faces from.
|
|
122
|
+
|
|
123
|
+ Returns
|
|
124
|
+ -------
|
|
125
|
+ known_encodings : list of np array
|
|
126
|
+ List of face encodings in a given image
|
|
127
|
+ face_locations : list of tuples
|
|
128
|
+ list of tuples for face locations in a given image
|
|
129
|
+
|
|
130
|
+ """
|
|
131
|
+
|
|
132
|
+ # Find face locations for all faces in an image
|
|
133
|
+ face_locations = face_recognition.face_locations(image)
|
|
134
|
+
|
|
135
|
+ # Create encodings for all faces in an image
|
|
136
|
+ known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
|
|
137
|
+ return known_encodings, face_locations
|
|
138
|
+
|
|
139
|
+ # Function to compare encodings
|
|
140
|
+ def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
|
|
141
|
+ """
|
|
142
|
+ Compares face encodings to check if 2 faces are same or not.
|
|
143
|
+
|
|
144
|
+ Parameters
|
|
145
|
+ ----------
|
|
146
|
+ unknown_encoding : np array
|
|
147
|
+ Face encoding of unknown people.
|
|
148
|
+ known_encodings : np array
|
|
149
|
+ Face encodings of known people.
|
|
150
|
+ known_names : list of strings
|
|
151
|
+ Names of known people
|
|
152
|
+
|
|
153
|
+ Returns
|
|
154
|
+ -------
|
|
155
|
+ acceptBool : Bool
|
|
156
|
+ face matched or not
|
|
157
|
+ duplicateName : String
|
|
158
|
+ Name of matched face
|
|
159
|
+ distance : Float
|
|
160
|
+ Distance between 2 faces
|
|
161
|
+
|
|
162
|
+ """
|
|
163
|
+ duplicateName = ""
|
|
164
|
+ distance = 0.0
|
|
165
|
+ matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
|
|
166
|
+ face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
|
|
167
|
+ best_match_index = np.argmin(face_distances)
|
|
168
|
+ distance = face_distances[best_match_index]
|
|
169
|
+ if matches[best_match_index]:
|
|
170
|
+ acceptBool = True
|
|
171
|
+ duplicateName = known_names[best_match_index]
|
|
172
|
+ else:
|
|
173
|
+ acceptBool = False
|
|
174
|
+ duplicateName = ""
|
|
175
|
+ return acceptBool, duplicateName, distance
|
|
176
|
+
|
|
177
|
+ p = []
|
|
178
|
+
|
|
179
|
+ # Save Image to new directory
|
|
180
|
+ def saveImageToDirectory(image, name, imageName):
|
|
181
|
+ """
|
|
182
|
+ Saves images to directory.
|
|
183
|
+
|
|
184
|
+ Parameters
|
|
185
|
+ ----------
|
|
186
|
+ image : cv2 mat
|
|
187
|
+ Image you want to save.
|
|
188
|
+ name : String
|
|
189
|
+ Directory where you want the image to be saved.
|
|
190
|
+ imageName : String
|
|
191
|
+ Name of image.
|
|
192
|
+
|
|
193
|
+ Returns
|
|
194
|
+ -------
|
|
195
|
+ None.
|
|
196
|
+
|
|
197
|
+ """
|
|
198
|
+ path = "./output/" + name
|
|
199
|
+ path1 = "./output/" + name
|
|
200
|
+ if os.path.exists(path):
|
|
201
|
+ pass
|
|
202
|
+ else:
|
|
203
|
+ os.mkdir(path)
|
|
204
|
+ cv2.imwrite(path + "/" + imageName, image)
|
|
205
|
+ x = []
|
|
206
|
+ c = (path1 + "/" + imageName)
|
|
207
|
+ x.append(c)
|
|
208
|
+ p.append(x)
|
|
209
|
+
|
|
210
|
+ # Function for creating encodings for known people
|
|
211
|
+ def processKnownPeopleImages(path=peoplePath, saveLocation="./known_encodings.pickle"):
|
|
212
|
+ """
|
|
213
|
+ Process images of known people and create face encodings to compare in future.
|
|
214
|
+ Eaach image should have just 1 face in it.
|
|
215
|
+
|
|
216
|
+ Parameters
|
|
217
|
+ ----------
|
|
218
|
+ path : STRING, optional
|
|
219
|
+ Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
|
|
220
|
+ It should be noted that each image in this dataset should contain only 1 face.
|
|
221
|
+ saveLocation : STRING, optional
|
|
222
|
+ Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
|
|
223
|
+
|
|
224
|
+ Returns
|
|
225
|
+ -------
|
|
226
|
+ None.
|
|
227
|
+
|
|
228
|
+ """
|
|
229
|
+
|
|
230
|
+ known_encodings = []
|
|
231
|
+ known_names = []
|
|
232
|
+ for img in os.listdir(path):
|
|
233
|
+ imgPath = path + img
|
|
234
|
+
|
|
235
|
+ # Read image
|
|
236
|
+ image = cv2.imread(imgPath)
|
|
237
|
+ name = img.rsplit('.')[0]
|
|
238
|
+ # Resize
|
|
239
|
+ image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
|
|
240
|
+
|
|
241
|
+ # Get locations and encodings
|
|
242
|
+ encs, locs = createEncodings(image)
|
|
243
|
+ try:
|
|
244
|
+ known_encodings.append(encs[0])
|
|
245
|
+ except IndexError:
|
|
246
|
+ pass
|
|
247
|
+ known_names.append(name)
|
|
248
|
+
|
|
249
|
+ for loc in locs:
|
|
250
|
+ top, right, bottom, left = loc
|
|
251
|
+
|
|
252
|
+ # Show Image
|
|
253
|
+ #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
|
|
254
|
+ # cv2.imshow("Image", image)
|
|
255
|
+ # cv2.waitKey(1)
|
|
256
|
+ #cv2.destroyAllWindows()
|
|
257
|
+ saveEncodings(known_encodings, known_names, saveLocation)
|
|
258
|
+
|
|
259
|
+ # Function for processing dataset images
|
|
260
|
+ def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
|
|
261
|
+ """
|
|
262
|
+ Process image in dataset from where you want to separate images.
|
|
263
|
+ It separates the images into directories of known people, groups and any unknown people images.
|
|
264
|
+ Parameters
|
|
265
|
+ ----------
|
|
266
|
+ path : STRING, optional
|
|
267
|
+ Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
|
|
268
|
+ It should be noted that each image in this dataset should contain only 1 face.
|
|
269
|
+ saveLocation : STRING, optional
|
|
270
|
+ Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
|
|
271
|
+
|
|
272
|
+ Returns
|
|
273
|
+ -------
|
|
274
|
+ None.
|
|
275
|
+
|
|
276
|
+ """
|
|
277
|
+ # Read pickle file for known people to compare faces from
|
|
278
|
+ people_encodings, names = readEncodingsPickle("./known_encodings.pickle")
|
|
279
|
+
|
|
280
|
+ for root, dirs, files in os.walk(datasetPath, topdown=False):
|
|
281
|
+
|
|
282
|
+ for name in files:
|
|
283
|
+ s = os.path.join(root, name)
|
|
284
|
+ #print(p)
|
|
285
|
+ # imgPath = path + img
|
|
286
|
+
|
|
287
|
+ # Read image
|
|
288
|
+ image = cv2.imread(s)
|
|
289
|
+ orig = image.copy()
|
|
290
|
+
|
|
291
|
+ # Resize
|
|
292
|
+ image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
|
|
293
|
+
|
|
294
|
+ # Get locations and encodings
|
|
295
|
+ encs, locs = createEncodings(image)
|
|
296
|
+
|
|
297
|
+ # Save image to a group image folder if more than one face is in image
|
|
298
|
+ # if len(locs) > 1:
|
|
299
|
+ # saveImageToDirectory(orig, "Group", img)
|
|
300
|
+
|
|
301
|
+ # Processing image for each face
|
|
302
|
+ i = 0
|
|
303
|
+ knownFlag = 0
|
|
304
|
+ for loc in locs:
|
|
305
|
+ top, right, bottom, left = loc
|
|
306
|
+ unknown_encoding = encs[i]
|
|
307
|
+ i += 1
|
|
308
|
+ acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
|
|
309
|
+ if acceptBool:
|
|
310
|
+ saveImageToDirectory(orig, duplicateName,name)
|
|
311
|
+ knownFlag = 1
|
|
312
|
+ if knownFlag == 1:
|
|
313
|
+ print("Match Found")
|
|
314
|
+ else:
|
|
315
|
+ saveImageToDirectory(orig, "0",name)
|
|
316
|
+
|
|
317
|
+ # Show Image
|
|
318
|
+ # cv2.rectangle(image, (left, top), (right, bottom), color=(255, 0, 0), thickness=2)
|
|
319
|
+ # # cv2.imshow("Image", image)
|
|
320
|
+ # cv2.waitKey(1)
|
|
321
|
+ # cv2.destroyAllWindows()
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+ def main():
|
|
325
|
+ """
|
|
326
|
+ Main Function.
|
|
327
|
+
|
|
328
|
+ Returns
|
|
329
|
+ -------
|
|
330
|
+ None.
|
|
331
|
+
|
|
332
|
+ """
|
|
333
|
+
|
|
334
|
+ processKnownPeopleImages()
|
|
335
|
+ processDatasetImages()
|
|
336
|
+ # shutil.make_archive('./Images', 'zip','./output')
|
|
337
|
+ # p='./Images.zip'
|
|
338
|
+ # return send_file(p,as_attachment=True)
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+ # import pandas as pd
|
|
342
|
+ # q = pd.DataFrame(p)
|
|
343
|
+ # x = q
|
|
344
|
+ # # x.drop(x.columns[0], axis=1, inplace=True)
|
|
345
|
+ # df = x.groupby([0], as_index=False).count()
|
|
346
|
+ # z = df[0].str.split('/', expand=True)
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+ # for i, group in z.groupby([2]):
|
|
350
|
+ # group.drop(group.columns[2], axis=1, inplace=True)
|
|
351
|
+ #group.to_csv(f'./output1/{i}.csv', index=False, sep='/', header=False)
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+##############################csv creation code ##############################
|
|
355
|
+ import pandas as pd
|
|
356
|
+ q = pd.DataFrame(p)
|
|
357
|
+ m = q
|
|
358
|
+ #print(m)
|
|
359
|
+ # x.drop(x.columns[Unnam], axis=1, inplace=True)
|
|
360
|
+ df = m.groupby([0], as_index=False).count()
|
|
361
|
+ first_column_name = df.columns[0]
|
|
362
|
+
|
|
363
|
+ # Rename the first column
|
|
364
|
+ df.rename(columns={first_column_name: 'col'}, inplace=True)
|
|
365
|
+ #print(df)
|
|
366
|
+ z = df['col'].str.split('/', expand=True)
|
|
367
|
+
|
|
368
|
+ z['ImagePath'] = z[3]
|
|
369
|
+
|
|
370
|
+ result = z.drop([0,1,3], axis=1)
|
|
371
|
+ result.rename({result.columns[-1]: 'test'}, axis=1, inplace=True)
|
|
372
|
+ # print(result)
|
|
373
|
+ result.to_csv('results1.csv')
|
|
374
|
+ import pandas as pd
|
|
375
|
+ import os
|
|
376
|
+ c = []
|
|
377
|
+ for root, dirs, files in os.walk(datasetPath, topdown=False):
|
|
378
|
+ for name in files:
|
|
379
|
+ # print(name)
|
|
380
|
+ L = os.path.join(root, name)
|
|
381
|
+ c.append(L)
|
|
382
|
+ df = pd.DataFrame(c)
|
|
383
|
+ #print('seconfdf')
|
|
384
|
+
|
|
385
|
+ first_column_name = df.columns[0]
|
|
386
|
+
|
|
387
|
+ # Rename the first column
|
|
388
|
+ df.rename(columns={first_column_name: 'col'}, inplace=True)
|
|
389
|
+ print(df)
|
|
390
|
+ df1 = df['col'].str.split("/", expand=True)
|
|
391
|
+ df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
|
|
392
|
+ #print('this is df1')
|
|
393
|
+ #print(df1)
|
|
394
|
+ df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
|
|
395
|
+ merge = pd.merge(df1, result, on='test', how='left')
|
|
396
|
+ merge.to_csv('merge.csv')
|
|
397
|
+ mergesplit = merge.loc[:,'test'].str.split(".", expand=True)
|
|
398
|
+ mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
|
|
399
|
+ mergesplit = mergesplit.loc[:,'ImageName' ]
|
|
400
|
+
|
|
401
|
+ merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
|
|
402
|
+ merge['EventName'] = merge['abc']
|
|
403
|
+ merge['Imagepath']="/_files/1/Gallery/"+merge['EventName']+'/'+ + merge['test']
|
|
404
|
+
|
|
405
|
+ frames = [merge, mergesplit]
|
|
406
|
+
|
|
407
|
+ r = pd.concat(frames, axis=1, join='inner')
|
|
408
|
+ r=r.iloc[:,3:]
|
|
409
|
+ #print(r)
|
|
410
|
+ r.to_csv('path.csv', index=False)
|
|
411
|
+ #r.to_json(r'./matched.json', orient="records")
|
|
412
|
+ column_list = ['Matched','Imagepath', 'ImageName', 'EventName']
|
|
413
|
+ r[column_list].to_json('matched.json', orient="records")
|
|
414
|
+
|
|
415
|
+#############################################################################################
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+ # merge.rename({merge.columns[-3]: 'ImagePath'}, axis=1, inplace=True)
|
|
420
|
+ #
|
|
421
|
+ # # print(merge)
|
|
422
|
+ # merge1 = merge.iloc[:, -2]
|
|
423
|
+ # merge12= merge.iloc[:, -3]
|
|
424
|
+ #
|
|
425
|
+ # # merge1.rename({merge1.columns[-1]: 'abc'}, axis=1, inplace=True)
|
|
426
|
+ # merge2 = merge.iloc[:, -1].str.split(".", expand=True)
|
|
427
|
+ # merge2.rename({merge2.columns[-1]: 'drop'}, axis=1, inplace=True)
|
|
428
|
+ # #merge2.rename({merge2.columns[-2]: 'ImageName'}, axis=1, inplace=True)
|
|
429
|
+ # print('this is merge1')
|
|
430
|
+ # print(merge1)
|
|
431
|
+ # print('this is merge2')
|
|
432
|
+ # print(merge2)
|
|
433
|
+ # mergefinal = pd.concat([merge1, merge2], axis=1, join='inner')
|
|
434
|
+ # # print(mergefinal)
|
|
435
|
+ # # print('-----------------')
|
|
436
|
+ #
|
|
437
|
+ # mergefinal.drop(columns=mergefinal.columns[-1], axis=1, inplace=True)
|
|
438
|
+ # # print(mergefinal)
|
|
439
|
+ # # print('--------------------------------------------------------------------------------')
|
|
440
|
+ # # mergefinal.rename({mergefinal.columns[-1]: 'ImageName'}, axis=1, inplace=True)
|
|
441
|
+ # # print('this is filename')
|
|
442
|
+ # # print(mergefinal)
|
|
443
|
+ # #mergefinal.rename({mergefinal.columns[-2]: 'EventName'}, axis=1, inplace=True)
|
|
444
|
+ # # print('this is foldername')
|
|
445
|
+ # # print(mergefinal)
|
|
446
|
+ #
|
|
447
|
+ # frames = [mergefinal, merge12]
|
|
448
|
+ #
|
|
449
|
+ # r = pd.concat(frames, axis=1, join='inner')
|
|
450
|
+ #
|
|
451
|
+ #
|
|
452
|
+ # r.to_csv('Imagepath1.csv', index=False)
|
|
453
|
+ # r.to_json('Imagepath1.json', orient="records")
|
|
454
|
+ # import shutil
|
|
455
|
+ # import os
|
|
456
|
+ # # import shutil module
|
|
457
|
+ # import shutil
|
|
458
|
+ #
|
|
459
|
+ # # import os module
|
|
460
|
+ # import os
|
|
461
|
+ # #################### move code#############
|
|
462
|
+ # # base path
|
|
463
|
+ # base_path = 'C:\\Users\\Bizgaze\\PycharmProjects\\face_recogniction\\move'
|
|
464
|
+ # import os
|
|
465
|
+ # dir_list = []
|
|
466
|
+ # rootdir = 'C:\\Users\\Bizgaze\\PycharmProjects\\face_recogniction\\Dataset'
|
|
467
|
+ # for file in os.listdir(rootdir):
|
|
468
|
+ # d = os.path.join(rootdir, file)
|
|
469
|
+ # if os.path.isdir(d):
|
|
470
|
+ # dir_list.append(d)
|
|
471
|
+ #
|
|
472
|
+ # # list of directories we want to move.
|
|
473
|
+ # # dir_list = ['test2', 'test4', 'test5', 'does_not_exist']
|
|
474
|
+ #
|
|
475
|
+ # # path to destination directory
|
|
476
|
+ # # dest = os.path.join(base_path, 'dest')
|
|
477
|
+ #
|
|
478
|
+ # print("Before moving directories:")
|
|
479
|
+ # print(os.listdir(base_path))
|
|
480
|
+ #
|
|
481
|
+ # # traverse each directory in dir_list
|
|
482
|
+ # for dir_ in dir_list:
|
|
483
|
+ #
|
|
484
|
+ # # create path to the directory in the
|
|
485
|
+ # # dir_list.
|
|
486
|
+ # source = os.path.join(base_path, dir_)
|
|
487
|
+ #
|
|
488
|
+ # # check if it is an existing directory
|
|
489
|
+ # if os.path.isdir(source):
|
|
490
|
+ # # move to destination path
|
|
491
|
+ # shutil.move(source, base_path)
|
|
492
|
+ #
|
|
493
|
+ # print("After moving directories:")
|
|
494
|
+ # print(os.listdir(base_path))
|
|
495
|
+
|
|
496
|
+ print("Completed")
|
|
497
|
+
|
|
498
|
+ if __name__ == "__main__":
|
|
499
|
+ main()
|
|
500
|
+
|
|
501
|
+ # return render_template('index.html')
|
|
502
|
+ p = './matched.json'
|
|
503
|
+ return send_file(p,as_attachment=True)
|
|
504
|
+
|
|
505
|
+ # return 'ALL IMAGES MATCHED'
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+@app.route('/json')
|
|
509
|
+def json():
|
|
510
|
+ p = './matched.json'
|
|
511
|
+ return send_file(p,as_attachment=True)
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+if __name__ == "__main__":
|
|
515
|
+ app.run(host="0.0.0.0",port=8081)
|
|
516
|
+
|
|
517
|
+
|