소스 검색

first commit

saikumar 2 년 전
커밋
48effe8886
100개의 변경된 파일75392개의 추가작업 그리고 0개의 파일을 삭제
  1. 30
    0
      Business_cards/ADD300/meta.json
  2. 18
    0
      Business_cards/ADD300/ner/cfg
  3. BIN
      Business_cards/ADD300/ner/model
  4. 1
    0
      Business_cards/ADD300/ner/moves
  5. 4
    0
      Business_cards/ADD300/tokenizer
  6. 1
    0
      Business_cards/ADD300/vocab/key2row
  7. 1
    0
      Business_cards/ADD300/vocab/lookups.bin
  8. 6883
    0
      Business_cards/ADD300/vocab/strings.json
  9. BIN
      Business_cards/ADD300/vocab/vectors
  10. 2207
    0
      Business_cards/Business_cards.py
  11. 35
    0
      Business_cards/L.csv
  12. 66
    0
      Business_cards/Lead Master.csv
  13. 13
    0
      Business_cards/T.csv
  14. BIN
      Business_cards/requirement.txt
  15. 75
    0
      Business_cards/templates/card.html
  16. 156
    0
      Business_cards/templates/index.html
  17. 25
    0
      Business_cards/test.txt
  18. 44
    0
      Business_cards/test123456.txt
  19. 10
    0
      Business_cards/visitingcard.csv
  20. 491
    0
      Events/Allunq_People.py
  21. 527
    0
      Events/Allunq_copy_gallery.py
  22. 1
    0
      Events/events12554.json
  23. 117
    0
      Events/front_face.py
  24. 49
    0
      Events/is_existALLUNQ.py
  25. 16
    0
      Events/jsonRequest.py
  26. 252
    0
      Events/main_application.py
  27. BIN
      Events/model/dlib_face_recognition_resnet_model_v1.dat
  28. BIN
      Events/model/shape_predictor_5_face_landmarks.dat
  29. BIN
      Events/model/shape_predictor_68_face_landmarks.dat
  30. 419
    0
      Events/people_Allunq_zero_maingallery.py
  31. 41
    0
      Events/prdiction.py
  32. BIN
      Events/re.txt
  33. 88
    0
      Events/remove.py
  34. 99
    0
      Events/sepration_cluster.py
  35. 39
    0
      Events/sepration_crop.py
  36. 10
    0
      Events/templates/Display.html
  37. 32
    0
      Events/templates/Gallery.html
  38. 84
    0
      Events/templates/index.html
  39. 39
    0
      Events/unique_1.py
  40. 352
    0
      Events/unique_Allunq.py
  41. 36
    0
      Invoice_parser/FITZ_250_450data/meta.json
  42. 18
    0
      Invoice_parser/FITZ_250_450data/ner/cfg
  43. BIN
      Invoice_parser/FITZ_250_450data/ner/model
  44. 1
    0
      Invoice_parser/FITZ_250_450data/ner/moves
  45. 4
    0
      Invoice_parser/FITZ_250_450data/tokenizer
  46. 1
    0
      Invoice_parser/FITZ_250_450data/vocab/key2row
  47. 1
    0
      Invoice_parser/FITZ_250_450data/vocab/lookups.bin
  48. 17583
    0
      Invoice_parser/FITZ_250_450data/vocab/strings.json
  49. BIN
      Invoice_parser/FITZ_250_450data/vocab/vectors
  50. 48
    0
      Invoice_parser/Invoice.csv
  51. 48
    0
      Invoice_parser/final.csv
  52. 20
    0
      Invoice_parser/finalwithcolen.csv
  53. 30
    0
      Invoice_parser/finalwithoutcolen.csv
  54. 1
    0
      Invoice_parser/firstjson.json
  55. 427
    0
      Invoice_parser/invoice.multiprocessing.py
  56. 2
    0
      Invoice_parser/invoicewithouttable.csv
  57. 10
    0
      Invoice_parser/invoicewithtable1.csv
  58. 10
    0
      Invoice_parser/invoicewithtable2.csv
  59. 10
    0
      Invoice_parser/item1.csv
  60. 10
    0
      Invoice_parser/item2.csv
  61. 28
    0
      Invoice_parser/main.csv
  62. 1
    0
      Invoice_parser/p/meta.json
  63. 18
    0
      Invoice_parser/p/ner/cfg
  64. BIN
      Invoice_parser/p/ner/model
  65. 1
    0
      Invoice_parser/p/ner/moves
  66. 4
    0
      Invoice_parser/p/tokenizer
  67. 1
    0
      Invoice_parser/p/vocab/key2row
  68. BIN
      Invoice_parser/p/vocab/lexemes.bin
  69. 6111
    0
      Invoice_parser/p/vocab/strings.json
  70. BIN
      Invoice_parser/p/vocab/vectors
  71. 87
    0
      Invoice_parser/requirementsinvoice.txt
  72. 92
    0
      Invoice_parser/templates/Error.html
  73. 97
    0
      Invoice_parser/templates/home.html
  74. 85
    0
      Invoice_parser/templates/invoice.html
  75. 82
    0
      Invoice_parser/templates/resume.html
  76. BIN
      Invoice_parser/upload_invoice/301.pdf
  77. BIN
      Invoice_parser/uploads/0.pdf
  78. 8
    0
      Resume_parser/AD.csv
  79. 9
    0
      Resume_parser/AD11.csv
  80. 16
    0
      Resume_parser/Ad1.csv
  81. 7
    0
      Resume_parser/Ad2.csv
  82. 1
    0
      Resume_parser/ME/meta.json
  83. 18
    0
      Resume_parser/ME/ner/cfg
  84. BIN
      Resume_parser/ME/ner/model
  85. 1
    0
      Resume_parser/ME/ner/moves
  86. 4
    0
      Resume_parser/ME/tokenizer
  87. 1
    0
      Resume_parser/ME/vocab/key2row
  88. BIN
      Resume_parser/ME/vocab/lexemes.bin
  89. 38152
    0
      Resume_parser/ME/vocab/strings.json
  90. BIN
      Resume_parser/ME/vocab/vectors
  91. 4
    0
      Resume_parser/PG.csv
  92. 4
    0
      Resume_parser/PGmerge.csv
  93. 5
    0
      Resume_parser/SSC.csv
  94. 4
    0
      Resume_parser/UG.csv
  95. 4
    0
      Resume_parser/UGmerge.csv
  96. 43
    0
      Resume_parser/bdeeducation_50_0.2/meta.json
  97. 18
    0
      Resume_parser/bdeeducation_50_0.2/ner/cfg
  98. BIN
      Resume_parser/bdeeducation_50_0.2/ner/model
  99. 1
    0
      Resume_parser/bdeeducation_50_0.2/ner/moves
  100. 0
    0
      Resume_parser/bdeeducation_50_0.2/tokenizer

+ 30
- 0
Business_cards/ADD300/meta.json 파일 보기

@@ -0,0 +1,30 @@
1
+{
2
+  "lang":"en",
3
+  "name":"model",
4
+  "version":"0.0.0",
5
+  "spacy_version":">=2.3.5",
6
+  "description":"",
7
+  "author":"",
8
+  "email":"",
9
+  "url":"",
10
+  "license":"",
11
+  "spacy_git_version":"1d4b1dea2",
12
+  "vectors":{
13
+    "width":0,
14
+    "vectors":0,
15
+    "keys":0,
16
+    "name":"spacy_pretrained_vectors"
17
+  },
18
+  "pipeline":[
19
+    "ner"
20
+  ],
21
+  "factories":{
22
+    "ner":"ner"
23
+  },
24
+  "labels":{
25
+    "ner":[
26
+      "Address",
27
+      "Last Name"
28
+    ]
29
+  }
30
+}

+ 18
- 0
Business_cards/ADD300/ner/cfg 파일 보기

@@ -0,0 +1,18 @@
1
+{
2
+  "beam_width":1,
3
+  "beam_density":0.0,
4
+  "beam_update_prob":1.0,
5
+  "cnn_maxout_pieces":3,
6
+  "nr_feature_tokens":6,
7
+  "nr_class":10,
8
+  "hidden_depth":1,
9
+  "token_vector_width":96,
10
+  "hidden_width":64,
11
+  "maxout_pieces":2,
12
+  "pretrained_vectors":null,
13
+  "bilstm_depth":0,
14
+  "self_attn_depth":0,
15
+  "conv_depth":4,
16
+  "conv_window":1,
17
+  "embed_size":2000
18
+}

BIN
Business_cards/ADD300/ner/model 파일 보기


+ 1
- 0
Business_cards/ADD300/ner/moves 파일 보기

@@ -0,0 +1 @@
1
+�¥movesÙ {"0":{},"1":{"Address":-1,"Last Name":-2},"2":{"Address":-1,"Last Name":-2},"3":{"Address":-1,"Last Name":-2},"4":{"":1,"Address":-1,"Last Name":-2},"5":{"":1}}

+ 4
- 0
Business_cards/ADD300/tokenizer
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


+ 1
- 0
Business_cards/ADD300/vocab/key2row 파일 보기

@@ -0,0 +1 @@
1
+€

+ 1
- 0
Business_cards/ADD300/vocab/lookups.bin 파일 보기

@@ -0,0 +1 @@
1
+�«lexeme_norm€

+ 6883
- 0
Business_cards/ADD300/vocab/strings.json
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


BIN
Business_cards/ADD300/vocab/vectors 파일 보기


+ 2207
- 0
Business_cards/Business_cards.py
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


+ 35
- 0
Business_cards/L.csv
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


+ 66
- 0
Business_cards/Lead Master.csv 파일 보기

@@ -0,0 +1,66 @@
1
+OrganizationName,OrganizationEmail,Designation,OrganizationPhoneNumber,ContactPersonName,Email,Url,CIN,PhoneNumber,PhoneNumber1,PhoneNumber2,Address,AddressLine1_1,AddressLine2_1,country_code,LandMark1,PinCode1,Source,Industry,AddressName2,AddressLine1_2,AddressLine2_2,LandMark2,PinCode2,LOB,TAGS,PortalName,CityName1,CityName2,Assigned To,values,values.1,Values,
2
+RICHWORTH,,SENIOR  SALES  ASSOCIATE ,,N.  PORCHEZHIYAN ,sales@richworth.in,,,9551272626,914442125566,,"Clothing, 15  Arcot  Street,  T.  Nagar,  Chennai  , , 600017",,,IN,"Thygarayanagar South NDS.O, Hindi Prachar Sabha, Thygarayanagar North ND, Thygarayanagar H.O",600017,,,,,,,,,,,Chennai,,,,,,
3
+SRIKRISHNAPOLYMERS,,DIRECTOR -  SALES  &  BUSINESS  DEVELOPMENT ,,Sameer ,sameer@srikrishnapolymers.in,,,+919063007351,,,">, 91  9063  007  351   sameer@srikrishnapolymers.in   www.srikrishnapolymers.in   Re.   [   >  #  8-4-101/59,  Mailerdevpally,  Rajendra  Nagar   NS  Hyderabad.  , , 500077",,,IN,Kattedan Ie,500077,,,,,,,,,,,Hyderabad,,,,,,
4
+SRIKRISHNAPOLYMERS,,DIRECTOR -  SALES  &  BUSINESS  DEVELOPMENT ,,Sameer ,sameer@srikrishnapolymers.in,,,+919063007351,,,">, 91  9063  007  351   sameer@srikrishnapolymers.in   www.srikrishnapolymers.in   Re.   [   >  #  8-4-101/59,  Mailerdevpally,  Rajendra  Nagar   NS  Hyderabad.  , , 500077",,,IN,Kattedan Ie,500077,,,,,,,,,,,Hyderabad,,,,,,
5
+SRIKRISHNAPOLYMERS,,DIRECTOR -  SALES  &  BUSINESS  DEVELOPMENT ,,Sameer ,sameer@srikrishnapolymers.in,,,+919063007351,,,">, 91  9063  007  351   sameer@srikrishnapolymers.in   www.srikrishnapolymers.in   Re.   [   >  #  8-4-101/59,  Mailerdevpally,  Rajendra  Nagar   NS  Hyderabad.  , , 500077",,,IN,Kattedan Ie,500077,,,,,,,,,,,Hyderabad,,,,,,
6
+APEXELEVATORSHYD,,,,P.Ramakrishna,ramakrishnaapexelevators@gmai.com,,,9959977066,9963110797,,Passenger,,,,,,,,,,,,,,,,,,,,,,
7
+APEXELEVATORSHYD,,,,P.Ramakrishna,ramakrishnaapexelevators@gmai.com,,,9959977066,9963110797,," Hospital, Hydraulic etc. Nagole XRoad, Manatha Nagar, Plot No. 43, Road No. 1, Hyderabad Website:www.apexelevatorshyd.com E mail:ramakrishnaapexelevators@gmai.com  ",,,,,,,,,,,,,,,,,,,,,,
8
+ICONPACKAGINGMACHINERIES,,"R.O.PLANTS, SPARES &SERVICE",,"Mfrs.: Automatic Form Fill &Seal Machines, Coding Machines",info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
9
+ICONPACKAGINGMACHINERIES,,"R.O.PLANTS, SPARES &SERVICE",,IcON,info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
10
+ICONPACKAGINGMACHINERIES,,"R.O.PLANTS, SPARES &SERVICE",,"Mfrs.: Automatic Form Fill &Seal Machines, Coding Machines",info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
11
+ICONPACKAGINGMACHINERIES,,,,"Mfrs.: Automatic Form Fill &Seal Machines, Coding Machines",info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
12
+ICONPACKAGINGMACHINERIES,,,,"Mfrs.: Automatic Form Fill &Seal Machines, Coding Machines",info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
13
+ICONPACKAGINGMACHINERIES,,,,"Mfrs.: Automatic Form Fill &Seal Machines, Coding Machines",info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
14
+ICONPACKAGINGMACHINERIES,,,,"Mfrs.: Automatic Form Fill &Seal Machines, Coding Machines",info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
15
+ICONPACKAGINGMACHINERIES,,,,,info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
16
+,,,,K. Mahesh Goud,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
17
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
18
+,,,,| ,naren_1968@yahoo.com,,,9724303106,,,"A-318, , , Vrindavan  Flats,  Nr.  Vejalpur  Police  Chowki,  Ahmedabad-55",,,,,55,,,,,,,,,,,,,,,,,
19
+,,,,| ,naren_1968@yahoo.com,,,9724303106,,,"A-318, , , Vrindavan  Flats,  Nr.  Vejalpur  Police  Chowki,  Ahmedabad-55",,,,,55,,,,,,,,,,,,,,,,,
20
+,,,,N.  D.  Singh  1 ,naren_1968@yahoo.com,,,9724303106,,,"A-318, , , Vrindavan  Flats,  Nr.  Vejalpur  Police  Chowki,  Ahmedabad-55",,,,,55,,,,,,,,,,,,,,,,,
21
+RICHWORTH,,SENIOR  SALES  ASSOCIATE ,,N.  PORCHEZHIYAN ,sales@richworth.in,,,9551272626,914442125566,,"Clothing, 15  Arcot  Street,  T.  Nagar,  Chennai  , , 600017",,,IN,"Thygarayanagar South NDS.O, Hindi Prachar Sabha, Thygarayanagar North ND, Thygarayanagar H.O",600017,,,,,,,,,,,Chennai,,,,,,
22
+,,,,KUSH  JAISWAL ,jaiswalkush273@gmail.com,,,8919794310,,,"1-91-10,   GSTIN  No :  37ACJPJ5588R12Z1   Fssai  :  20121038, , -530017",,,IN,"L B Colony, M.V.P.Colony",530017,,,,,,,,,,,Visakhapatnam,,,,,,
23
+ECOLASTIC,,SALES&MARKETING,,D Madhusudhana,help@ecolastic.in,mrd@ecolastic.in,,,+919118699999,,,"Floor, Phase-1,Plot No.35/2,IDA, Cherlapally Hyderabac, Telangana, , , -500051",,,IN,Hindustan Cables Ltd,500051,,,,,,,,,,,Hyderabad,,,,,
24
+ECOLASTIC,,SALES&MARKETING,,D Madhusudhana,"help@ecolastic.in, mrd@ecolastic.in",,,+919118699999,,,"Floor, Phase-1,Plot No.35/2,IDA, Cherlapally Hyderabac, Telangana, , , -500051",,,IN,Hindustan Cables Ltd,500051,,,,,,,,,,,Hyderabad,,,,,,
25
+RICHWORTH,,SENIOR  SALES  ASSOCIATE ,,N.  PORCHEZHIYAN ,sales@richworth.in,,,9551272626,914442125566,,"Clothing, 15  Arcot  Street,  T.  Nagar,  Chennai  , , 600017",,,IN,"Thygarayanagar South NDS.O, Hindi Prachar Sabha, Thygarayanagar North ND, Thygarayanagar H.O",600017,,,,,,,,,,,Chennai,,,,,,
26
+SRIKRISHNAPOLYMERS,,DIRECTOR -  SALES  &  BUSINESS  DEVELOPMENT ,,Sameer ,sameer@srikrishnapolymers.in,,,+919063007351,,,">, 91  9063  007  351   sameer@srikrishnapolymers.in   www.srikrishnapolymers.in   Re.   [   >  #  8-4-101/59,  Mailerdevpally,  Rajendra  Nagar   NS  Hyderabad.  , , 500077",,,IN,Kattedan Ie,500077,,,,,,,,,,,Hyderabad,,,,,,
27
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
28
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
29
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines, . Telangana, India. Cell:91338, , , -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
30
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines, . Telangana, India. Cell:91338, , , -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
31
+ICONPACKAGINGMACHINERIES,,,,,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines, . Telangana, India. Cell:91338, , , -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
32
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines, . Telangana, India. Cell:91338, , , -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
33
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines, . Telangana, India. Cell:91338, , , -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
34
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines Coding Machines R.O.Plants, Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
35
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines, , . Telangana, India. Cell:91338, , -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
36
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines Coding Machines R.O.Plants, Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad , , , , -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
37
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,," Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040. Telangana, India. Cell:91338772222, E-mail:info@iconpackagingmachineries.com Website: www.iconpackagingmachineries.com  ",,,,,,,,,,,,,,,,,,,,,,
38
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines Coding Machines R.O.Plants, Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad , , , , -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
39
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines Coding Machines R.O.Plants, Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad   -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
40
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines Coding Machines R.O.Plants, Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
41
+SRIKRISHNAPOLYMERS,,DIRECTOR -  SALES  &  BUSINESS  DEVELOPMENT ,,Sameer ,sameer@srikrishnapolymers.in,,,+919063007351,,,">+919063007351 sameer@srikrishnapolymers.in www.srikrishnapolymers.in Re. [ >#8-4-101/59,Mailerdevpally,RajendraNagar NSHyderabad. 500077",,,IN,Kattedan Ie,500077,,,,,,,,,,,Hyderabad,,,,,,
42
+APEXELEVATORSHYD,,,,P.Ramakrishna,ramakrishnaapexelevators@gmai.com,,,9959977066,9963110797,," Hospital, Hydraulic etc. Nagole XRoad, Manatha Nagar, Plot No. 43, Road No. 1, Hyderabad Website:www.apexelevatorshyd.com E mail:ramakrishnaapexelevators@gmai.com  ",,,,,,,,,,,,,,,,,,,,,,
43
+ECOLASTIC,,SALES&MARKETING,,D Madhusudhana,"help@ecolastic.in, mrd@ecolastic.in",,,+919118699999,,,"Floor Phase-1,Plot No.35/2,IDA, Cherlapally Hyderabac, Telangana , -500051",,,IN,Hindustan Cables Ltd,500051,,,,,,,,,,,Hyderabad,,,,,,
44
+ECOLASTIC,,SALES&MARKETING,,D Madhusudhana,"help@ecolastic.in, mrd@ecolastic.in",,,+919118699999,,,"Floor Phase-1,Plot No.35/2,IDA, Cherlapally Hyderabac, Telangana , -500051",,,IN,Hindustan Cables Ltd,500051,,,,,,,,,,,Hyderabad,,,,,,
45
+ICONPACKAGINGMACHINERIES,,,,K. Mahesh Goud,info@iconpackagingmachineries.com,,,+919177974444,,,"Machines Coding Machines R.O.Plants, Spares &Service H.No.5-2-571, Tirumala Nagar, Meerpet, HB Colony, Moula-Ali, Hyderabad -500 040",,,IN,"Ie Moulali, Aphb Colony Moulali",500040,,,,,,,,,,,Hyderabad,,,,,,
46
+ECOLASTIC,,SALES&MARKETING,,D Madhusudhana,"help@ecolastic.in, mrd@ecolastic.in",,,+919118699999,,,"Floor Phase-1,Plot No.35/2,IDA, Cherlapally Hyderabac, Telangana , -500051",,,IN,Hindustan Cables Ltd,500051,,,,,,,,,,,Hyderabad,,,,,,
47
+APEXELEVATORSHYD,,,,P.Ramakrishna,ramakrishnaapexelevators@gmai.com,,,9959977066,9963110797,," Hospital, Hydraulic etc. Nagole XRoad, Manatha Nagar, Plot No. 43, Road No. 1, Hyderabad Website:www.apexelevatorshyd.com E mail:ramakrishnaapexelevators@gmai.com  ",,,,,,,,,,,,,,,,,,,,,,
48
+RICHWORTH,,SENIOR  SALES  ASSOCIATE ,,N.  PORCHEZHIYAN ,sales@richworth.in,,,9551272626,914442125566,,"Clothing 15ArcotStreet,T.Nagar,Chennai 600017",,,IN,"Thygarayanagar South NDS.O, Hindi Prachar Sabha, Thygarayanagar North ND, Thygarayanagar H.O",600017,,,,,,,,,,,Chennai,,,,,,
49
+,,,,N.  D.  Singh  1 ,naren_1968@yahoo.com,,,9724303106,,,"A-318VrindavanFlats,Nr.VejalpurPoliceChowki,Ahmedabad-55",,,,,55,,,,,,,,,,,,,,,,,
50
+,,,,KUSH  JAISWAL ,jaiswalkush273@gmail.com,,,8919794310,,,"1-91-10, MIG-95,SECTOR-5,POSTALCOLONY,MVPCOLONY,VISAKHAPATNAN , -530017",,,IN,"L B Colony, M.V.P.Colony",530017,,,,,,,,,,,Visakhapatnam,,,,,,
51
+,,,,L.K.  Agarwal ,cargo_lifters@rediffmail.com,,,9848092514,,,"G-20 MinervaComplex,S.D.Road,Secunderabad- 500003",,,IN,"Secunderabad H.O, Kingsway",500003,,,,,,,,,,,Hyderabad,,,,,,
52
+AB  TRADING  CORPORATION ,,,,Sa ,,,,9848135926,9542650066,8142399179,"4-2-16 , MedipallyRoad,NTPC,Jyothinagar-, 505  215",,,IN,"A.P.Colony (Karim Nagar), Jyothinagar (Karim Nagar)",505215,,,,,,,,,,,Karim Nagar,,,,,,
53
+,,,,KUSH  JAISWAL ,jaiswalkush273@gmail.com,,,8919794310,,,"1-91-10, MIG-95,SECTOR-5,POSTALCOLONY,MVPCOLONY,VISAKHAPATNAM , -530017",,,IN,"L B Colony, M.V.P.Colony",530017,,,,,,,,,,,Visakhapatnam,,,,,,
54
+APEXELEVATORSHYD,,,,P  Ramakrishna ,ramakrishnaapexelevators@gmall,,,9959977066,9963110797,," Hospital,  Hydraulic  etc.,  Nagolé X Road,  Manatha  Nagar,  Plot  No.  43,  Road  No.  1,  Hyderabad  Website :.  www.apexelevatorshyd.com  ~~  ‘E-mail-: ‘ramakrishnaapexelevators@gmall  com   |  i                    ",,,,,,,,,,,,,,,,,,,,,,
55
+,,,,L.K.  Agarwal ,cargo_lifters@rediffmail.com,,,91-9848092514,,,"A—~~xIWS L.K.Agarwal 91 -9848092514 - + Cargo er40-27813957LiftersofIndia| —|+91 40-27810394 A a G4cargo_lifters@rediffmail.com ff aH j4 4 Se G-20,MinervaComplex,S.D.Road,Secunderabad- 500003",,,IN,"Secunderabad H.O, Kingsway",500003,,,,,,,,,,,Hyderabad,,,,,,
56
+SRIKRISHNAPOLYMERS,,DIRECTOR  -  SALES  &  BUSINESS  DEVELOPMENT ,,Sameer ,sameer@srikrishnapolymers.in,,,+919063007351,,,"8-4-101/59 Mailerdevpally,RajendraNagar 3 ” Hyderabad. 500077",,,IN,Kattedan Ie,500077,,,,,,,,,,,Hyderabad,,,,,,
57
+B  TRADING CORPORATION ,,,,,,,,9848135926,9542650066,8142399179,,,,,,,,,,,,,,,,,,,,,,,
58
+SGS,,|  MANAGER  -  KEY  ACCOUNTS ,,Durga  Prasad  Akella ,a@sgs,,,+918976997839,,,,,,,,,,,,,,,,,,,,,,,,,
59
+COMPRESSOR  LTD. ,,,,Falgun  Pandya  |  MD ,,,,+919825032784,,,,,,,,,,,,,,,,,,,,,,,,,
60
+APEXELEVATORSHYD,,,,P  Ramakrishna ,ramakrishnaapexelevators@gmall,,,9959977066,9963110797,,,,,,,,,,,,,,,,,,,,,,,,
61
+,,,,KUSH  JAISWAL ,jaiswalkush273@gmail.com,,,8919794310,,,,,,,,,,,,,,,,,,,,,,,,,
62
+,,,,KUSH  JAISWAL ,jaiswalkush273@gmail.com,,,8919794310,,,"1-91-10, MIG-95,SECTOR-5,POSTALCOLONY,MVPCOLONY,VISAKHAPATNAM , -530017",,,IN,"L B Colony, M.V.P.Colony",530017,,,,,,,,,,,Visakhapatnam,,,,,,
63
+,,,,KUSH  JAISWAL ,jaiswalkush273@gmail.com,,,8919794310,,,"1-91-10, MIG-95,SECTOR-5,POSTALCOLONY,MVPCOLONY,VISAKHAPATNAM , -530017",,,IN,"L B Colony, M.V.P.Colony",530017,,,,,,,,,,,Visakhapatnam,,,,,,
64
+,,SALES  &  SENIOR  ENGINEER ,,MEHUL  DEWDA ,info@19barvaya.com,,,+918160696665,+917802088493,,"A-607, DevashrayResidency,RTORoad,Vastral,Ahmedabad , -382418",,,IN,Vastral,382418,,,,,,,,,,,Ahmedabad,,,,,,
65
+,,,,pe ,,,,8885009002,9676509851,9199285113,  Be   CE  TEL  ECP                ,,,,,,,,,,,,,,,,,,,,,,
66
+,,,,Kawalpreet  Singh  — ,"sardartoys@yahoo.com, Serdarjitoys3755@gmail.com",,,9256390000,9256360000,0161-2771586,"Chowk 6,2771626,5000222»Email -sardartoys@yahoo.comSARDARJITOYS=RaniJhenic!Poed. Ghumar Mandi,Ludhiana,Punjab- 141001",,,IN,"Ludhiana H.O, New Courts, Sarabha Nagar, P&t Colony (Ludhiana), Satsang Road, Ludhiana Kty, Durgapuri, Bharat Nagar (Ludhiana), Sidhpeeth",141001,,,,,,,,,,,Ludhiana,,,,,,

+ 13
- 0
Business_cards/T.csv 파일 보기

@@ -0,0 +1,13 @@
1
+Keys,Values
2
+Address,"Chowk 6,2771626,5000222»Email -sardartoys@yahoo.comSARDARJITOYS=RaniJhenic!Poed. Ghumar Mandi,Ludhiana,Punjab- 141001"
3
+PhoneNumber,9256390000
4
+PhoneNumber1,9256360000
5
+PhoneNumber2,0161-2771586
6
+Email,"sardartoys@yahoo.com, Serdarjitoys3755@gmail.com"
7
+PinCode1,141001
8
+country_code,IN
9
+LandMark1,"Ludhiana H.O, New Courts, Sarabha Nagar, P&t Colony (Ludhiana), Satsang Road, Ludhiana Kty, Durgapuri, Bharat Nagar (Ludhiana), Sidhpeeth"
10
+state_name,Punjab
11
+state_code,23
12
+CityName1,Ludhiana
13
+ContactPersonName,Kawalpreet  Singh  — 

BIN
Business_cards/requirement.txt 파일 보기


+ 75
- 0
Business_cards/templates/card.html 파일 보기

@@ -0,0 +1,75 @@
1
+<html>
2
+
3
+<head>
4
+    <style>
5
+.button {
6
+  background-color: #000000; /* Green */
7
+  border: none;
8
+  color: white;
9
+  padding: 15px 32px;
10
+  text-align: center;
11
+  text-decoration: none;
12
+  display: inline-block;
13
+  margin: 4px 2px;
14
+  cursor: pointer;
15
+}
16
+
17
+
18
+.button1 {font-size: 10px;}
19
+.button2 {font-size: 12px;}
20
+.button3 {font-size: 16px;}
21
+.button4 {font-size: 50px;}
22
+.button5 {font-size: 24px;border-radius: 12px;}
23
+
24
+</style>
25
+
26
+</head>
27
+
28
+<body>
29
+<div style="background-image: url('https://lh3.googleusercontent.com/p/AF1QipONWF8G50u9Bu-dklcj3kzesofOn8Z0q0LdHeU1=w1080-h608-p-no-v0');
30
+   /* Full height */
31
+  height: 100%;
32
+    / Center and scale the image nicely /
33
+  background-position: center;
34
+  background-repeat: no-repeat;
35
+  background-size: cover;">
36
+
37
+      <h1 class="button button5"> Visiting Card PARSER </h1>
38
+    <br>  <br>
39
+    <a  class="button button5"href="/">HOME</a>
40
+
41
+
42
+
43
+<form action="/submit" method="POST" enctype="multipart/form-data">
44
+        <div class="form-group">
45
+
46
+
47
+          <div class="custom-file">
48
+            <input type="file" class="button button5" name="image" id="image"> <br> <br><br>
49
+<div class="bg"></div>
50
+  <input class="button button5" type="submit">
51
+
52
+          </div>
53
+        </div>
54
+    </form>
55
+
56
+    
57
+<style>
58
+
59
+   body {
60
+	width: 100%;
61
+	height:100%;
62
+	font-family: 'Helvetica';
63
+	background-color:#000000;
64
+	color: #fff;
65
+	font-size: 24px;
66
+	text-align:center;
67
+	letter-spacing:1.4px;
68
+
69
+}
70
+  </style>
71
+
72
+
73
+
74
+</body>
75
+</html>

+ 156
- 0
Business_cards/templates/index.html 파일 보기

@@ -0,0 +1,156 @@
1
+<html>
2
+
3
+<head>
4
+    <meta charset="UTF-8">
5
+    <meta http-equiv="X-UA-Compatible" content="IE=edge">
6
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
7
+    <title>Document</title>
8
+    <style>
9
+        @import url('https://fonts.googleapis.com/css2?family=Source+Code+Pro:ital,wght@0,200;0,400;0,500;1,200;1,400;1,500;1,600&display=swap');
10
+        *,*::after,*::before{
11
+            padding: 0;
12
+            margin: 0;
13
+            box-sizing: border-box;
14
+        }
15
+
16
+        body{
17
+            overflow-x: hidden;
18
+            font-family: 'Source Code Pro', monospace;
19
+        }
20
+        .container_scan{
21
+            background-color: #7ee8fa;
22
+            background-image: linear-gradient(315deg, #7ee8fa 0%, #80ff72 74%);
23
+            width: 100vw;
24
+            min-height: 100vh;
25
+
26
+            display: flex;
27
+            flex-direction: column;
28
+            justify-content: center;
29
+            align-items: center;
30
+            overflow: hidden;
31
+            overflow-y: auto;
32
+
33
+
34
+        }
35
+
36
+        .headingCard{
37
+            padding: 15px 0;
38
+        }
39
+
40
+        .card{
41
+            background-color: #fff;
42
+            width: 400px;
43
+            height: 500px;
44
+            border-radius: 25px;
45
+            border: 5px dashed #B8F1B0 ;
46
+        }
47
+
48
+        .file {
49
+  opacity: 0;
50
+  width: 0.1px;
51
+  height: 0.1px;
52
+  position: absolute;
53
+}
54
+
55
+.file-input label {
56
+  display: block;
57
+  position: relative;
58
+  width: 200px;
59
+  height: 50px;
60
+  border-radius: 25px;
61
+  background: linear-gradient(40deg, #ff6ec4, #7873f5);
62
+  box-shadow: 0 4px 7px rgba(0, 0, 0, 0.4);
63
+  display: flex;
64
+  align-items: center;
65
+  justify-content: center;
66
+  color: #fff;
67
+  font-weight: bold;
68
+  cursor: pointer;
69
+  transition: transform .2s ease-out;
70
+}
71
+
72
+.btn{
73
+    padding: 10px 50px;
74
+    border-radius: 25px;
75
+    width: 250px;
76
+  height: 50px;
77
+
78
+    border: none;
79
+    background: linear-gradient(40deg, #f56effa6, #7873f5);
80
+    color: #fff;
81
+    cursor: pointer;
82
+    box-shadow: 0 4px 7px rgba(0, 0, 0, 0.4);
83
+
84
+    transition: all .2s ease;
85
+}
86
+
87
+.btn:hover{
88
+    transform: translateY(5px);
89
+    background: linear-gradient(40deg, #f56eff63, #73a3f5);
90
+}
91
+h1 {
92
+  font-size: 40px;
93
+}
94
+
95
+.formSubmit{
96
+    height: 100%;
97
+    display: flex;
98
+    flex-direction: column;
99
+    align-items: center;
100
+    justify-content: center;
101
+    gap: 30px;
102
+}
103
+
104
+   .button {
105
+  background: linear-gradient(40deg, #f56effa6, #7873f5); /* Green */
106
+  border: none;
107
+  color: white;
108
+  padding: 15px 32px;
109
+  text-align: center;
110
+  text-decoration: none;
111
+  display: inline-block;
112
+  margin: 4px 2px;
113
+  cursor: pointer;
114
+}
115
+
116
+
117
+.button1 {font-size: 10px;}
118
+.button2 {font-size: 12px;border-radius: 12px;}
119
+.button3 {font-size: 16px;}
120
+.button4 {font-size: 50px;}
121
+.button5 {font-size: 24px;border-radius: 12px;}
122
+    </style>
123
+</head>
124
+
125
+<body>
126
+
127
+ <div class="container_scan">
128
+    <h1 class="headingCard">Visiting Card Parser</h1>
129
+     <div class="card">
130
+
131
+
132
+
133
+<form class="formSubmit" action="/submit" method="POST" enctype="multipart/form-data">
134
+        <div class="form-group">
135
+          <div class="custom-file">
136
+
137
+
138
+        <div class="file-input">
139
+                        <input type="file" id="image" name="image"  class="button button2">
140
+
141
+                      </div>
142
+
143
+
144
+
145
+
146
+          </div>
147
+        </div>
148
+         <input type="submit" class="btn btn-submit">
149
+    </form>
150
+
151
+
152
+
153
+
154
+
155
+</body>
156
+</html>

+ 25
- 0
Business_cards/test.txt 파일 보기

@@ -0,0 +1,25 @@
1
+KY 
2
+— 
3
+LP 
4
+A, 
5
+— 
6
+~~ 
7
+x 
8
+IWS 
9
+L.K.  Agarwal 
10
+91 -  98480  92514 
11
+- 
12
++ 
13
+Ca  rgo 
14
+(er  40  -  27813957 
15
+Lifters  of  India  | —  |+91 40  -  27810394 
16
+A 
17
+a 
18
+G4  cargo_lifters@rediffmail.com 
19
+ff 
20
+aH 
21
+j 
22
+4 
23
+4 
24
+Se 
25
+G-20,  Minerva  Complex,  S.D.  Road,  Secunderabad  -  500003.

+ 44
- 0
Business_cards/test123456.txt 파일 보기

@@ -0,0 +1,44 @@
1
+KY 
2
+
3
+— 
4
+LP 
5
+A, 
6
+
7
+— 
8
+~~ 
9
+x 
10
+IWS 
11
+
12
+L.K.  Agarwal 
13
+
14
+91 -  98480  92514 
15
+
16
+- 
17
+
18
++ 
19
+
20
+Ca  rgo 
21
+
22
+(er  40  -  27813957 
23
+Lifters  of  India  | —  |+91 40  -  27810394 
24
+
25
+A 
26
+
27
+a 
28
+
29
+G4  cargo_lifters@rediffmail.com 
30
+
31
+ff 
32
+
33
+aH 
34
+
35
+j 
36
+4 
37
+
38
+4 
39
+
40
+Se 
41
+
42
+G-20,  Minerva  Complex,  S.D.  Road,  Secunderabad  -  500003.
43
+
44
+

+ 10
- 0
Business_cards/visitingcard.csv 파일 보기

@@ -0,0 +1,10 @@
1
+Keys,Values
2
+PhoneNumber,91-9848092514
3
+Email,cargo_lifters@rediffmail.com
4
+ContactPersonName,L.K.  Agarwal 
5
+PinCode1,500003
6
+country_code,IN
7
+LandMark1,"Secunderabad H.O, Kingsway"
8
+state_name,Telangana
9
+state_code,40
10
+CityName1,Hyderabad

+ 491
- 0
Events/Allunq_People.py 파일 보기

@@ -0,0 +1,491 @@
1
+import pickle
2
+import numpy as np
3
+import face_recognition
4
+import os
5
+import cv2
6
+import datetime
7
+import click
8
+@click.command()
9
+@click.argument('eventid', default='')
10
+
11
+
12
+def predict123(eventid):
13
+
14
+    original_working_directory = os.getcwd()
15
+    new_networked_directory = r'\\192.168.88.99\\Bizgaze\\port6003\\wwwroot\\_files\\'
16
+    # change to the networked directory
17
+    os.chdir(new_networked_directory)
18
+
19
+
20
+
21
+    People = "./ALL_UNQ/" + eventid + "/"
22
+    Gallery = './guestimage/'+ eventid + "/"
23
+
24
+
25
+    x= datetime.datetime.now()
26
+    print('Execution Started at:',x)
27
+
28
+    def saveEncodings(encs, names, fname='encodings.pickle'):
29
+        """
30
+        Save encodings in a pickle file to be used in future.
31
+
32
+        Parameters
33
+        ----------
34
+        encs : List of np arrays
35
+            List of face encodings.
36
+        names : List of strings
37
+            List of names for each face encoding.
38
+        fname : String, optional
39
+            Name/Location for pickle file. The default is "encodings.pickle".
40
+
41
+        Returns
42
+        -------
43
+        None.
44
+
45
+        """
46
+
47
+        data = []
48
+        d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
49
+        data.extend(d)
50
+
51
+        encodingsFile = fname
52
+
53
+        # dump the facial encodings data to disk
54
+        print("[INFO] serializing encodings...")
55
+        f = open(encodingsFile, "wb")
56
+        f.write(pickle.dumps(data))
57
+        f.close()
58
+
59
+        # Function to read encodings
60
+
61
+    def readEncodingsPickle(fname):
62
+        """
63
+        Read Pickle file.
64
+
65
+        Parameters
66
+        ----------
67
+        fname : String
68
+            Name of pickle file.(Full location)
69
+
70
+        Returns
71
+        -------
72
+        encodings : list of np arrays
73
+            list of all saved encodings
74
+        names : List of Strings
75
+            List of all saved names
76
+
77
+        """
78
+
79
+        data = pickle.loads(open(fname, "rb").read())
80
+        data = np.array(data)
81
+        encodings = [d["encoding"] for d in data]
82
+        names = [d["name"] for d in data]
83
+        return encodings, names
84
+
85
+    # Function to create encodings and get face locations
86
+    def createEncodings(image):
87
+        print("Encoding")
88
+        """
89
+        Create face encodings for a given image and also return face locations in the given image.
90
+
91
+        Parameters
92
+        ----------
93
+        image : cv2 mat
94
+            Image you want to detect faces from.
95
+
96
+        Returns
97
+        -------
98
+        known_encodings : list of np array
99
+            List of face encodings in a given image
100
+        face_locations : list of tuples
101
+            list of tuples for face locations in a given image
102
+
103
+        """
104
+
105
+        # Find face locations for all faces in an image
106
+        face_locations = face_recognition.face_locations(image)
107
+
108
+        # Create encodings for all faces in an image
109
+        known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
110
+        return known_encodings, face_locations
111
+
112
+    # Function to compare encodings
113
+    def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
114
+        """
115
+        Compares face encodings to check if 2 faces are same or not.
116
+
117
+        Parameters
118
+        ----------
119
+        unknown_encoding : np array
120
+            Face encoding of unknown people.
121
+        known_encodings : np array
122
+            Face encodings of known people.
123
+        known_names : list of strings
124
+            Names of known people
125
+
126
+        Returns
127
+        -------
128
+        acceptBool : Bool
129
+            face matched or not
130
+        duplicateName : String
131
+            Name of matched face
132
+        distance : Float
133
+            Distance between 2 faces
134
+
135
+        """
136
+        duplicateName = ""
137
+        distance = 0.0
138
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
139
+
140
+        face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
141
+
142
+        best_match_index = np.argmin(face_distances)
143
+
144
+        distance = face_distances[best_match_index]
145
+        if matches[best_match_index]:
146
+            acceptBool = True
147
+            duplicateName = known_names[best_match_index]
148
+        else:
149
+            acceptBool = False
150
+            duplicateName = ""
151
+        return acceptBool, duplicateName, distance
152
+
153
+    p = []
154
+    def f_CSVwrite():
155
+        import pandas as pd
156
+        q = pd.DataFrame(p)
157
+        #print(q)
158
+        m = q
159
+        # print(m)
160
+        #   x.drop(x.columns[Unnam], axis=1, inplace=True)
161
+        df = m.groupby([0], as_index=False).count()
162
+        z = df[0].str.split('/', expand=True)
163
+
164
+
165
+        z.to_csv('all_people.csv',index=False)
166
+        import pandas as pd
167
+        df2 = pd.read_csv('./all_people.csv')
168
+        df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
169
+        df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True)
170
+        df2 = df2[['Matched', 'test']]
171
+
172
+
173
+        import pandas as pd
174
+        import os
175
+        c = []
176
+        for root, dirs, files in os.walk(Gallery,
177
+                                         topdown=False):
178
+            for name in files:
179
+                # print(name)
180
+                L = os.path.join(root, name)
181
+                c.append(L)
182
+        df = pd.DataFrame(c)
183
+
184
+        df1 = df[0].str.split("/", expand=True)
185
+        #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
186
+        # print('this is df1')
187
+        # print(df1)
188
+        df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
189
+        merge = pd.merge(df2, df1, on='test', how='left')
190
+        merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True)
191
+        # merge.to_csv('merge.csv')
192
+        mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
193
+        mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
194
+        mergesplit = mergesplit.loc[:, 'ImageName']
195
+
196
+        #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
197
+        #merge['EventName'] = merge['abc']
198
+        merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test']
199
+
200
+
201
+        frames = [merge, mergesplit]
202
+
203
+        r = pd.concat(frames, axis=1, join='inner')
204
+
205
+
206
+        df2 = r.dropna(subset=['Matched'])
207
+
208
+
209
+        #df2['Matched'] = df2['Matched'].astype(str)
210
+        #df2['Matched'] = df2['Matched'].astype(int)
211
+        column_list = ['Matched', 'Imagepath', 'ImageName', 'EventName']
212
+        df2[column_list].to_csv('all_people_fina123.csv', index=False)
213
+        df2[column_list].to_json('all_people_final123.json', orient="records")
214
+
215
+
216
+        # import requests
217
+        # import json
218
+
219
+        # with open('all_people_final123.json', 'r') as json_file:
220
+        #     json_load = json.load(json_file)
221
+        #     #url = "https://test.bizgaze.app:8443/apis/v4/bizgaze/integrations/testevents/save"
222
+
223
+        # payload = json.dumps(json_load).replace("]", "").replace("[", "")
224
+        # print(payload)
225
+        # headers = {
226
+        #     'Authorization': 'stat 7f1007799b1f42999544d0338251bb19',
227
+        #     'Content-Type': 'application/json'
228
+        #     }
229
+        # response = requests.request("POST", url, headers=headers, data=payload)
230
+        # print("##############################################################")
231
+        # print(response.text)
232
+
233
+        # p.clear()
234
+
235
+    # Save Image to new directory
236
+    def saveImageToDirectory(image, name, imageName):
237
+        """
238
+        Saves images to directory.
239
+
240
+        Parameters
241
+        ----------
242
+        image : cv2 mat
243
+            Image you want to save.
244
+        name : String
245
+            Directory where you want the image to be saved.
246
+        imageName : String
247
+            Name of image.
248
+
249
+        Returns
250
+        -------
251
+        None.
252
+
253
+        """
254
+        # from pathlib import Path
255
+        # a=
256
+        # Path('Allunq_People/' + eventid).mkdir(exist_ok=True)
257
+        path = original_working_directory+"Allunq_People/" +eventid+'/'+ name
258
+        path1 = original_working_directory+"Allunq_People/" +eventid+'/'+name
259
+        if os.path.exists(path):
260
+            pass
261
+        else:
262
+            if not os.path.exists(path):
263
+                os.makedirs(path)
264
+            # os.mkdir(path,exist_ok=True)
265
+        cv2.imwrite(path + "/" + imageName, image)
266
+        x = []
267
+        c = (path1 + "/" + imageName)
268
+        x.append(c)
269
+        p.append(x)
270
+        f_CSVwrite()
271
+
272
+    # Function for creating encodings for known people
273
+    def processKnownPeopleImages(path=People, saveLocation="./Zero_gallery_known_encodings.pickle"):
274
+        """
275
+        Process images of known people and create face encodings to compare in future.
276
+        Eaach image should have just 1 face in it.
277
+
278
+        Parameters
279
+        ----------
280
+        path : STRING, optional
281
+            Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
282
+            It should be noted that each image in this dataset should contain only 1 face.
283
+        saveLocation : STRING, optional
284
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
285
+
286
+        Returns
287
+        -------
288
+        None.
289
+
290
+        """
291
+
292
+        known_encodings = []
293
+        known_names = []
294
+        for img in os.listdir(path):
295
+            imgPath = path + img
296
+
297
+            # Read image
298
+            image = cv2.imread(imgPath)
299
+            name = img.rsplit('.')[0]
300
+            # Resize
301
+            print(imgPath)
302
+            import pathlib
303
+            
304
+            file = pathlib.Path(str(path+"Thumbs.db"))
305
+            if file.exists ():
306
+                os.remove(path+"Thumbs.db")
307
+            else:
308
+                pass
309
+            image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
310
+
311
+            # Get locations and encodings
312
+            encs, locs = createEncodings(image)
313
+            try:
314
+                known_encodings.append(encs[0])
315
+            except IndexError:
316
+                os.remove(People+img)
317
+            #known_encodings.append(encs[568])
318
+            known_names.append(name)
319
+
320
+            for loc in locs:
321
+                top, right, bottom, left = loc
322
+
323
+            # Show Image
324
+            #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2)
325
+            # cv2.imshow("Image", image)
326
+           # cv2.waitKey(1)
327
+            #cv2.destroyAllWindows()
328
+        saveEncodings(known_encodings, known_names, saveLocation)
329
+
330
+    # Function for processing dataset images
331
+    def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
332
+        """
333
+        Process image in dataset from where you want to separate images.
334
+        It separates the images into directories of known people, groups and any unknown people images.
335
+        Parameters
336
+        ----------
337
+        path : STRING, optional
338
+            Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
339
+            It should be noted that each image in this dataset should contain only 1 face.
340
+        saveLocation : STRING, optional
341
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
342
+
343
+        Returns
344
+        -------
345
+        None.
346
+
347
+        """
348
+        # Read pickle file for known people to compare faces from
349
+        people_encodings, names = readEncodingsPickle("./Zero_gallery_known_encodings.pickle")
350
+
351
+        for root, dirs, files in os.walk(Gallery, topdown=False):
352
+
353
+            for name in files:
354
+                s = os.path.join(root, name)
355
+                #print(p)
356
+          #  imgPath = path + img
357
+
358
+            # Read image
359
+                image = cv2.imread(s)
360
+                orig = image.copy()
361
+                # print(imgPath)
362
+                # import pathlib
363
+            
364
+                # file = pathlib.Path(str(path+"Thumbs.db"))
365
+                # if file.exists ():
366
+                #     os.remove(path+"Thumbs.db")
367
+                # else:
368
+                #     pass
369
+            # Resize
370
+                image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
371
+
372
+            # Get locations and encodings
373
+                encs, locs = createEncodings(image)
374
+
375
+            # Save image to a group image folder if more than one face is in image
376
+            # if len(locs) > 1:
377
+            #     saveImageToDirectory(orig, "Group", img)
378
+
379
+            # Processing image for each face
380
+                i = 0
381
+                knownFlag = 0
382
+                for loc in locs:
383
+                    top, right, bottom, left = loc
384
+                    unknown_encoding = encs[i]
385
+                    i += 1
386
+                    acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
387
+                    if acceptBool:
388
+                        saveImageToDirectory(orig, duplicateName,name)
389
+                        knownFlag = 1
390
+                if knownFlag == 1:
391
+                    print("Match Found")
392
+
393
+                else:
394
+                    saveImageToDirectory(orig, "0",name)
395
+
396
+            # Show Image
397
+            #     cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2)
398
+            # # cv2.imshow("Image", image)
399
+            #     cv2.waitKey(1)
400
+            #     cv2.destroyAllWindows()
401
+
402
+
403
+    def main():
404
+        """
405
+        Main Function.
406
+
407
+        Returns
408
+        -------
409
+        None.
410
+
411
+        """
412
+
413
+        processKnownPeopleImages()
414
+        processDatasetImages()
415
+        # import pandas as pd
416
+        # q = pd.DataFrame(p)
417
+        # #print(q)
418
+        # m = q
419
+        # # print(m)
420
+        # #   x.drop(x.columns[Unnam], axis=1, inplace=True)
421
+        # df = m.groupby([0], as_index=False).count()
422
+        # z = df[0].str.split('/', expand=True)
423
+
424
+
425
+        # z.to_csv('all_people.csv',index=False)
426
+        # import pandas as pd
427
+        # df2 = pd.read_csv('./all_people.csv')
428
+        # df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
429
+        # df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True)
430
+        # df2 = df2[['Matched', 'test']]
431
+
432
+
433
+        # import pandas as pd
434
+        # import os
435
+        # c = []
436
+        # for root, dirs, files in os.walk(Gallery,
437
+        #                                  topdown=False):
438
+        #     for name in files:
439
+        #         # print(name)
440
+        #         L = os.path.join(root, name)
441
+        #         c.append(L)
442
+        # df = pd.DataFrame(c)
443
+
444
+        # df1 = df[0].str.split("/", expand=True)
445
+        # #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
446
+        # # print('this is df1')
447
+        # # print(df1)
448
+        # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
449
+        # merge = pd.merge(df2, df1, on='test', how='left')
450
+        # merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True)
451
+        # # merge.to_csv('merge.csv')
452
+        # mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
453
+        # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
454
+        # mergesplit = mergesplit.loc[:, 'ImageName']
455
+
456
+        # #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
457
+        # #merge['EventName'] = merge['abc']
458
+        # merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test']
459
+
460
+
461
+        # frames = [merge, mergesplit]
462
+
463
+        # r = pd.concat(frames, axis=1, join='inner')
464
+
465
+
466
+        # df2 = r.dropna(subset=['Matched'])
467
+
468
+
469
+        # #df2['Matched'] = df2['Matched'].astype(str)
470
+        # #df2['Matched'] = df2['Matched'].astype(int)
471
+        # column_list = ['Matched', 'Imagepath', 'ImageName', 'EventName']
472
+        # df2[column_list].to_csv('all_people_fina123.csv', index=False)
473
+        # df2[column_list].to_json('all_people_final123.json', orient="records")
474
+
475
+        # print("Completed")
476
+
477
+    if __name__ == "__main__":
478
+        main()
479
+
480
+
481
+    #    return render_template('index.html')
482
+    y=datetime.datetime.now()
483
+    print('Completed at:',y)
484
+    z=y-x
485
+    print('Time Taken:',z)
486
+    return (str(y-x))
487
+    #return 'ALL IMAGES MATCHED'
488
+
489
+
490
+
491
+predict123()

+ 527
- 0
Events/Allunq_copy_gallery.py 파일 보기

@@ -0,0 +1,527 @@
1
+import pickle
2
+import numpy as np
3
+import face_recognition
4
+import os
5
+import cv2
6
+import datetime
7
+import click
8
+import requests
9
+@click.command()
10
+@click.argument('eventid', default='')
11
+
12
+
13
+def predict456(eventid):
14
+
15
+    original_working_directory = os.getcwd()
16
+    new_networked_directory = r'\\192.168.88.99\\Bizgaze\\port6003\\wwwroot\\_files\\'
17
+    # change to the networked directory
18
+    os.chdir(new_networked_directory)
19
+
20
+
21
+
22
+
23
+
24
+
25
+    #People = './ALL_UNQ/' + eventid + "/"
26
+    People="./ALL_UNQ/"+ eventid + "/"
27
+    #Gallery = './Copy_Gallery/'+ eventid + "/"
28
+    Gallery='./1/CopyGallery/'+ eventid + "/"
29
+
30
+
31
+
32
+    x= datetime.datetime.now()
33
+    print('ALLunq_copy_gallery Running')
34
+    print('Execution Started at:',x)
35
+
36
+    def saveEncodings(encs, names, fname='encodings.pickle'):
37
+        """
38
+        Save encodings in a pickle file to be used in future.
39
+
40
+        Parameters
41
+        ----------
42
+        encs : List of np arrays
43
+            List of face encodings.
44
+        names : List of strings
45
+            List of names for each face encoding.
46
+        fname : String, optional
47
+            Name/Location for pickle file. The default is "encodings.pickle".
48
+
49
+        Returns
50
+        -------
51
+        None.
52
+
53
+        """
54
+
55
+        data = []
56
+        d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
57
+        data.extend(d)
58
+
59
+        encodingsFile = fname
60
+
61
+        # dump the facial encodings data to disk
62
+        print("[INFO] serializing encodings...")
63
+        f = open(encodingsFile, "wb")
64
+        f.write(pickle.dumps(data))
65
+        f.close()
66
+
67
+        # Function to read encodings
68
+
69
+    def readEncodingsPickle(fname):
70
+        """
71
+        Read Pickle file.
72
+
73
+        Parameters
74
+        ----------
75
+        fname : String
76
+            Name of pickle file.(Full location)
77
+
78
+        Returns
79
+        -------
80
+        encodings : list of np arrays
81
+            list of all saved encodings
82
+        names : List of Strings
83
+            List of all saved names
84
+
85
+        """
86
+
87
+        data = pickle.loads(open(fname, "rb").read())
88
+        data = np.array(data)
89
+        encodings = [d["encoding"] for d in data]
90
+        names = [d["name"] for d in data]
91
+        return encodings, names
92
+
93
+    # Function to create encodings and get face locations
94
+    def createEncodings(image):
95
+        print("Encoding")
96
+        """
97
+        Create face encodings for a given image and also return face locations in the given image.
98
+
99
+        Parameters
100
+        ----------
101
+        image : cv2 mat
102
+            Image you want to detect faces from.
103
+
104
+        Returns
105
+        -------
106
+        known_encodings : list of np array
107
+            List of face encodings in a given image
108
+        face_locations : list of tuples
109
+            list of tuples for face locations in a given image
110
+
111
+        """
112
+
113
+        # Find face locations for all faces in an image
114
+        face_locations = face_recognition.face_locations(image)
115
+
116
+        # Create encodings for all faces in an image
117
+        known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
118
+        return known_encodings, face_locations
119
+
120
+    # Function to compare encodings
121
+    def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
122
+        """
123
+        Compares face encodings to check if 2 faces are same or not.
124
+
125
+        Parameters
126
+        ----------
127
+        unknown_encoding : np array
128
+            Face encoding of unknown people.
129
+        known_encodings : np array
130
+            Face encodings of known people.
131
+        known_names : list of strings
132
+            Names of known people
133
+
134
+        Returns
135
+        -------
136
+        acceptBool : Bool
137
+            face matched or not
138
+        duplicateName : String
139
+            Name of matched face
140
+        distance : Float
141
+            Distance between 2 faces
142
+
143
+        """
144
+        duplicateName = ""
145
+        distance = 0.0
146
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
147
+
148
+        face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
149
+
150
+        best_match_index = np.argmin(face_distances)
151
+
152
+        distance = face_distances[best_match_index]
153
+        if matches[best_match_index]:
154
+            acceptBool = True
155
+            duplicateName = known_names[best_match_index]
156
+        else:
157
+            acceptBool = False
158
+            duplicateName = ""
159
+        return acceptBool, duplicateName, distance
160
+
161
+    p = []
162
+
163
+    def f_CSVwrite():
164
+        import pandas as pd
165
+        q = pd.DataFrame(p)
166
+        #print(q)
167
+        m = q
168
+        # print(m)
169
+        #   x.drop(x.columns[Unnam], axis=1, inplace=True)
170
+        df = m.groupby([0], as_index=False).count()
171
+        z = df[0].str.split('/', expand=True)
172
+
173
+
174
+        z.to_csv('zzzzzzzzzzzzz.csv',index=False)
175
+        import pandas as pd
176
+        df2 = pd.read_csv('./zzzzzzzzzzzzz.csv')
177
+        df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
178
+        df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True)
179
+        df2 = df2[['Matched', 'test']]
180
+
181
+
182
+        import pandas as pd
183
+        import os
184
+        c = []
185
+        for root, dirs, files in os.walk(Gallery,
186
+                                         topdown=False):
187
+            for name in files:
188
+                # print(name)
189
+                L = os.path.join(root, name)
190
+                c.append(L)
191
+        df = pd.DataFrame(c)
192
+
193
+        df1 = df[0].str.split("/", expand=True)
194
+        #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
195
+        # print('this is df1')
196
+        # print(df1)
197
+        df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
198
+        merge = pd.merge(df2, df1, on='test', how='left')
199
+        merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True)
200
+        # merge.to_csv('merge.csv')
201
+        mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
202
+        mergesplit.rename({mergesplit.columns[-2]: 'ImageName1'}, axis=1, inplace=True)
203
+        mergesplit = mergesplit.loc[:, 'ImageName1']
204
+
205
+        #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
206
+        #merge['EventName'] = merge['abc']
207
+        merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test']
208
+        merge['Matched']='\\_files\\ALL_UNQ\\'+eventid+'\\'+df2['Matched']+'.jpg'
209
+        merge["ImageName"]=df2['Matched']+'.jpg'
210
+
211
+        frames = [merge, mergesplit]
212
+
213
+        r = pd.concat(frames, axis=1, join='inner')
214
+
215
+
216
+        df2 = r.dropna(subset=['Matched'])
217
+
218
+
219
+        #df2['Matched'] = df2['Matched'].astype(str)
220
+        #df2['Matched'] = df2['Matched'].astype(int)
221
+        column_list = ['Matched', 'ImageName','Imagepath', 'ImageName1', 'EventName']
222
+        df2[column_list].to_csv('Zero_Gallery123254.csv', index=False)
223
+        df2[column_list].to_json('events12554.json', orient="records")
224
+
225
+
226
+
227
+        # import json
228
+        #
229
+        # with open('events.json', 'r') as json_file:
230
+        #     json_load = json.load(json_file)
231
+        #
232
+        #     print(json_load)
233
+        import requests
234
+        import json
235
+
236
+        with open('events12554.json', 'r') as json_file:
237
+            json_load = json.load(json_file)
238
+            url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/testevents/unregistereduser"
239
+
240
+        payload = json.dumps(json_load).replace("]", "").replace("[", "")
241
+        print(payload)
242
+        headers = {
243
+            'Authorization': 'stat e44ced3eff684aa9b932672ea8406029',
244
+            'Content-Type': 'application/json'
245
+            }
246
+        response = requests.request("POST", url, headers=headers, data=payload)
247
+        print("##############################################################")
248
+        print(response.text)
249
+
250
+        p.clear()
251
+
252
+
253
+    # Save Image to new directory
254
+    def saveImageToDirectory(image, name, imageName):
255
+        """
256
+        Saves images to directory.
257
+
258
+        Parameters
259
+        ----------
260
+        image : cv2 mat
261
+            Image you want to save.
262
+        name : String
263
+            Directory where you want the image to be saved.
264
+        imageName : String
265
+            Name of image.
266
+
267
+        Returns
268
+        -------
269
+        None.
270
+
271
+        """
272
+        path = original_working_directory+"/Allunq_CopyGallery/" + name
273
+        path1 = original_working_directory+"/Allunq_CopyGallery/" + name
274
+        if os.path.exists(path):
275
+            pass
276
+        else:
277
+            if not os.path.exists(path):
278
+                os.makedirs(path)
279
+            # os.mkdir(path,exist_ok=True)
280
+        cv2.imwrite(path + "/" + imageName, image)
281
+        x = []
282
+        c = (path1 + "/" + imageName)
283
+        x.append(c)
284
+        p.append(x)
285
+        f_CSVwrite()
286
+
287
+    # Function for creating encodings for known people
288
+    def processKnownPeopleImages(path=People, saveLocation="./Zero_gallery_known_encodings.pickle"):
289
+        """
290
+        Process images of known people and create face encodings to compare in future.
291
+        Eaach image should have just 1 face in it.
292
+
293
+        Parameters
294
+        ----------
295
+        path : STRING, optional
296
+            Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
297
+            It should be noted that each image in this dataset should contain only 1 face.
298
+        saveLocation : STRING, optional
299
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
300
+
301
+        Returns
302
+        -------
303
+        None.
304
+
305
+        """
306
+
307
+        known_encodings = []
308
+        known_names = []
309
+        for img in os.listdir(path):
310
+            imgPath = path + img
311
+
312
+            # Read image
313
+            image = cv2.imread(imgPath)
314
+            name = img.rsplit('.')[0]
315
+            # Resize
316
+            print(imgPath)
317
+            import pathlib
318
+            
319
+            file = pathlib.Path(str(path+"Thumbs.db"))
320
+            if file.exists ():
321
+                 os.remove(path+"Thumbs.db")
322
+            else:
323
+                pass
324
+            image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
325
+
326
+            # Get locations and encodings
327
+            encs, locs = createEncodings(image)
328
+            try:
329
+                known_encodings.append(encs[0])
330
+            except IndexError:
331
+                os.remove(People+img)
332
+            #known_encodings.append(encs[568])
333
+            known_names.append(name)
334
+
335
+            for loc in locs:
336
+                top, right, bottom, left = loc
337
+
338
+            # Show Image
339
+            #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2)
340
+            # cv2.imshow("Image", image)
341
+           # cv2.waitKey(1)
342
+            #cv2.destroyAllWindows()
343
+        saveEncodings(known_encodings, known_names, saveLocation)
344
+
345
+    # Function for processing dataset images
346
+    def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
347
+        """
348
+        Process image in dataset from where you want to separate images.
349
+        It separates the images into directories of known people, groups and any unknown people images.
350
+        Parameters
351
+        ----------
352
+        path : STRING, optional
353
+            Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
354
+            It should be noted that each image in this dataset should contain only 1 face.
355
+        saveLocation : STRING, optional
356
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
357
+
358
+        Returns
359
+        -------
360
+        None.
361
+
362
+        """
363
+        # Read pickle file for known people to compare faces from
364
+        people_encodings, names = readEncodingsPickle("./Zero_gallery_known_encodings.pickle")
365
+
366
+        for root, dirs, files in os.walk(Gallery, topdown=False):
367
+
368
+            for name in files:
369
+                s = os.path.join(root, name)
370
+                #print(p)
371
+          #  imgPath = path + img
372
+
373
+            # Read image
374
+                image = cv2.imread(s)
375
+                orig = image.copy()
376
+
377
+            # Resize
378
+                image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
379
+
380
+            # Get locations and encodings
381
+                encs, locs = createEncodings(image)
382
+
383
+            # Save image to a group image folder if more than one face is in image
384
+            # if len(locs) > 1:
385
+            #     saveImageToDirectory(orig, "Group", img)
386
+
387
+            # Processing image for each face
388
+                i = 0
389
+                knownFlag = 0
390
+                for loc in locs:
391
+                    top, right, bottom, left = loc
392
+                    unknown_encoding = encs[i]
393
+                    i += 1
394
+                    acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
395
+                    if acceptBool:
396
+                        saveImageToDirectory(orig, duplicateName,name)
397
+                        knownFlag = 1
398
+                if knownFlag == 1:
399
+                    print("Match Found")
400
+
401
+                else:
402
+                    saveImageToDirectory(orig, "0",name)
403
+
404
+            # Show Image
405
+            #     cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2)
406
+            # # cv2.imshow("Image", image)
407
+            #     cv2.waitKey(1)
408
+            #     cv2.destroyAllWindows()
409
+
410
+
411
+    def main():
412
+        """
413
+        Main Function.
414
+
415
+        Returns
416
+        -------
417
+        None.
418
+
419
+        """
420
+
421
+        processKnownPeopleImages()
422
+        processDatasetImages()
423
+        # import pandas as pd
424
+        # q = pd.DataFrame(p)
425
+        # #print(q)
426
+        # m = q
427
+        # # print(m)
428
+        # #   x.drop(x.columns[Unnam], axis=1, inplace=True)
429
+        # df = m.groupby([0], as_index=False).count()
430
+        # z = df[0].str.split('/', expand=True)
431
+
432
+
433
+        # z.to_csv('zzzzzzzzzzzzz.csv',index=False)
434
+        # import pandas as pd
435
+        # df2 = pd.read_csv('./zzzzzzzzzzzzz.csv')
436
+        # df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
437
+        # df2.rename({df2.columns[-2]: 'Matched'}, axis=1, inplace=True)
438
+        # df2 = df2[['Matched', 'test']]
439
+
440
+
441
+        # import pandas as pd
442
+        # import os
443
+        # c = []
444
+        # for root, dirs, files in os.walk(Gallery,
445
+        #                                  topdown=False):
446
+        #     for name in files:
447
+        #         # print(name)
448
+        #         L = os.path.join(root, name)
449
+        #         c.append(L)
450
+        # df = pd.DataFrame(c)
451
+
452
+        # df1 = df[0].str.split("/", expand=True)
453
+        # #df1.rename({df1.columns[-2]: 'abc'}, axis=1, inplace=True)
454
+        # # print('this is df1')
455
+        # # print(df1)
456
+        # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
457
+        # merge = pd.merge(df2, df1, on='test', how='left')
458
+        # merge.rename({merge.columns[-1]: 'EventName'}, axis=1, inplace=True)
459
+        # # merge.to_csv('merge.csv')
460
+        # mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
461
+        # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
462
+        # mergesplit = mergesplit.loc[:, 'ImageName']
463
+
464
+        # #merge.rename({merge.columns[-1]: 'Matched'}, axis=1, inplace=True)
465
+        # #merge['EventName'] = merge['abc']
466
+        # merge['Imagepath'] = "\\_files\\1\\Gallery\\" + merge['EventName'] + '\\' + + merge['test']
467
+        # merge['Matched']='\\_files\\ALL_UNQ\\'+eventid+'\\'+df2['Matched']+'.jpg'
468
+        # merge["MatchedImageName"]=df2['Matched']
469
+
470
+        # frames = [merge, mergesplit]
471
+
472
+        # r = pd.concat(frames, axis=1, join='inner')
473
+
474
+
475
+        # df2 = r.dropna(subset=['Matched'])
476
+
477
+
478
+        # #df2['Matched'] = df2['Matched'].astype(str)
479
+        # #df2['Matched'] = df2['Matched'].astype(int)
480
+        # column_list = ['Matched','MatchedImageName','Imagepath', 'ImageName', 'EventName']
481
+        # df2[column_list].to_csv('Zero_Gallery123254.csv', index=False)
482
+        # df2[column_list].to_json('events12554.json', orient="records")
483
+
484
+
485
+
486
+        # import json
487
+        #
488
+        # with open('events.json', 'r') as json_file:
489
+        #     json_load = json.load(json_file)
490
+        #
491
+        #     print(json_load)
492
+        # import requests
493
+        # import json
494
+
495
+        # with open('events12554.json', 'r') as json_file:
496
+        #     json_load = json.load(json_file)
497
+        #     #url = "https://eventxstreamnew.bizgaze.com:5443/apis/v4/bizgaze/integrations/testevents/unregistereduser"
498
+
499
+        # payload = json.dumps(json_load).replace("]", "").replace("[", "")
500
+        # print(payload)
501
+        # headers = {
502
+        #     'Authorization': 'stat e44ced3eff684aa9b932672ea8406029',
503
+        #     'Content-Type': 'application/json'
504
+        #     }
505
+        # response = requests.request("POST", url, headers=headers, data=payload)
506
+        # print("##############################################################")
507
+        # print(response.text)
508
+
509
+
510
+
511
+        print("Completed")
512
+
513
+    if __name__ == "__main__":
514
+        main()
515
+
516
+
517
+    #    return render_template('index.html')
518
+    y=datetime.datetime.now()
519
+    print('Completed at:',y)
520
+    z=y-x
521
+    print('Time Taken:',z)
522
+    return (str(y-x))
523
+    #return 'ALL IMAGES MATCHED'
524
+
525
+
526
+
527
+predict456()

+ 1
- 0
Events/events12554.json 파일 보기

@@ -0,0 +1 @@
1
+[{"Matched":"\\_files\\ALL_UNQ\\100013660000161\\sefsf4556.jpg","ImageName":"sefsf4556.jpg","Imagepath":"\\_files\\1\\Gallery\\100013660000161\\100011460001404.jpg","ImageName1":"100011460001404","EventName":"100013660000161"}]

+ 117
- 0
Events/front_face.py 파일 보기

@@ -0,0 +1,117 @@
1
+#from IPython.core.pylabtools import find_gui_and_backend
2
+#from oswalk import files
3
+#from google.colab.patches import cv2_imshow
4
+import cv2
5
+import mediapipe as mp
6
+import numpy as np
7
+import glob
8
+import click
9
+mp_face_mesh = mp.solutions.face_mesh
10
+face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
11
+@click.command()
12
+@click.argument('eventid', default='')
13
+
14
+def cap(eventid):
15
+
16
+  for files in glob.glob("C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\sepration_crop\\"+eventid+"\\*"):
17
+    image = cv2.imread(files)
18
+
19
+    # Flip the image horizontally for a later selfie-view display
20
+    # Also convert the color space from BGR to RGB
21
+    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
22
+
23
+    # To improve performance
24
+    image.flags.writeable = False
25
+    
26
+    # Get the result
27
+    results = face_mesh.process(image)
28
+    
29
+    # To improve performance
30
+    image.flags.writeable = True
31
+    
32
+    # Convert the color space from RGB to BGR
33
+    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
34
+
35
+    img_h, img_w, img_c = image.shape
36
+    face_3d = []
37
+    face_2d = []
38
+
39
+    if results.multi_face_landmarks:
40
+        for face_landmarks in results.multi_face_landmarks:
41
+            for idx, lm in enumerate(face_landmarks.landmark):
42
+                if idx == 33 or idx == 263 or idx == 1 or idx == 61 or idx == 291 or idx == 199:
43
+                    if idx == 1:
44
+                        nose_2d = (lm.x * img_w, lm.y * img_h)
45
+                        nose_3d = (lm.x * img_w, lm.y * img_h, lm.z * 8000)
46
+
47
+                    x, y = int(lm.x * img_w), int(lm.y * img_h)
48
+
49
+                    # Get the 2D Coordinates
50
+                    face_2d.append([x, y])
51
+
52
+                    # Get the 3D Coordinates
53
+                    face_3d.append([x, y, lm.z])       
54
+            
55
+            # Convert it to the NumPy array
56
+            face_2d = np.array(face_2d, dtype=np.float64)
57
+
58
+            # Convert it to the NumPy array
59
+            face_3d = np.array(face_3d, dtype=np.float64)
60
+
61
+            # The camera matrix
62
+            focal_length = 1 * img_w
63
+
64
+            cam_matrix = np.array([ [focal_length, 0, img_h / 2],
65
+                                    [0, focal_length, img_w / 2],
66
+                                    [0, 0, 1]])
67
+
68
+            # The Distance Matrix
69
+            dist_matrix = np.zeros((4, 1), dtype=np.float64)
70
+
71
+            # Solve PnP
72
+            success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)
73
+
74
+            # Get rotational matrix
75
+            rmat, jac = cv2.Rodrigues(rot_vec)
76
+
77
+            # Get angles
78
+            angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
79
+
80
+            # Get the y rotation degree
81
+            x = angles[0] * 360
82
+            y = angles[1] * 360
83
+
84
+            # print(y)
85
+
86
+            # See where the user's head tilting
87
+            if y < -20:
88
+                text = "Left"
89
+            elif y > 20:
90
+                text = "Right"
91
+            elif x < -20:
92
+                text = "Down"
93
+            else:
94
+              text = "Forward"
95
+             #djtillu.append(files)
96
+              print(files)
97
+              import os
98
+              import shutil
99
+              shutil.copy2(files, 'C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\front_face\\'+eventid+"\\")
100
+
101
+            # Display the nose direction
102
+            nose_3d_projection, jacobian = cv2.projectPoints(nose_3d, rot_vec, trans_vec, cam_matrix, dist_matrix)
103
+
104
+            p1 = (int(nose_2d[0]), int(nose_2d[1]))
105
+            p2 = (int(nose_3d_projection[0][0][0]), int(nose_3d_projection[0][0][1]))
106
+            
107
+            cv2.line(image, p1, p2, (255, 0, 0), 2)
108
+
109
+            # Add the text on the image
110
+            cv2.putText(image, text, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
111
+
112
+    # cv2_imshow(image)
113
+    #
114
+    # if cv2.waitKey(5) & 0xFF == 27:
115
+    #   pass
116
+
117
+cap()

+ 49
- 0
Events/is_existALLUNQ.py 파일 보기

@@ -0,0 +1,49 @@
1
+import os
2
+import click
3
+import shutil
4
+@click.command()
5
+@click.argument('eventid', default='')
6
+
7
+
8
+
9
+
10
+def checkfolder(eventid):
11
+
12
+    original_working_directory = os.getcwd()
13
+    new_networked_directory = r'\\192.168.88.99\\Bizgaze\\port6003\\wwwroot\\_files\\'
14
+    # change to the networked directory
15
+    os.chdir(new_networked_directory)
16
+
17
+
18
+
19
+
20
+    for dirpath, dirnames, files in os.walk('.\\ALL_UNQ\\' + eventid + '/'):
21
+        if os.listdir(dirpath)==[]:
22
+            print("files not found")
23
+            for root, dirs, files in os.walk('C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\unique_1\\' + eventid + '\\'):
24
+                for file in files:
25
+                    path_file = os.path.join(root, file)
26
+                    shutil.move(path_file, '.\\ALL_UNQ\\' + eventid + "\\")
27
+
28
+        else:
29
+            print("files found")
30
+            cmd = "python C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\unique_Allunq.py "+ str(eventid)
31
+            os.system(cmd)
32
+
33
+checkfolder()
34
+
35
+
36
+
37
+
38
+
39
+
40
+
41
+
42
+
43
+
44
+
45
+
46
+
47
+
48
+
49
+

+ 16
- 0
Events/jsonRequest.py 파일 보기

@@ -0,0 +1,16 @@
1
+import requests
2
+import json
3
+with open('C:\\Users\\Bizgaze\\Desktop\\AI\\AI_Events\\csv\\EventXtream.json', 'r') as json_file:
4
+    json_load = json.load(json_file)
5
+    url = "http://localhost:3088/apis/v4/bizgaze/integrations/json/save/List"
6
+
7
+    payload1 = json.dumps(json_load)#.replace("]", "").replace("[", "")
8
+    print('--------------------------------------------------------------------------')
9
+    print(payload1)
10
+    headers = {
11
+        'Authorization': 'Stat 22cbfadfa548448bb0b55193bc8e99fa',
12
+        'Content-Type': 'application/json'
13
+    }
14
+    response = requests.request("POST", url, headers=headers, data=payload1)
15
+    print("##############################################################")
16
+    print(response.text)

+ 252
- 0
Events/main_application.py 파일 보기

@@ -0,0 +1,252 @@
1
+import os
2
+
3
+import click
4
+from flask import Flask, render_template, request, redirect, send_file
5
+import shutil
6
+import glob
7
+
8
+
9
+
10
+app = Flask(__name__)
11
+
12
+@app.route('/', methods=['GET'])
13
+def home():
14
+    return render_template('index.html')
15
+
16
+
17
+import datetime
18
+
19
+# def crop_Alluq():
20
+#     import os
21
+#     cmd = "python .\\sepration_crop.py"
22
+#     os.system(cmd)
23
+#
24
+#     import os
25
+#     cmd = "python .\\front_face.py"
26
+#     os.system(cmd)
27
+#
28
+#     import os
29
+#     cmd = "python .\\sepration_cluster.py"
30
+#     os.system(cmd)
31
+#
32
+#     import os
33
+#     cmd = "python .\\unique_1.py"
34
+#     os.system(cmd)
35
+#
36
+#     import os
37
+#     cmd = "python .\\unique_Allunq.py"
38
+#     os.system(cmd)
39
+#
40
+#     # import os
41
+#     # cmd = "python .\\Allunq_copy_gallery.py"
42
+#     # os.system(cmd)
43
+#
44
+#     import os
45
+#     cmd = "python .\\remove.py"
46
+#     os.system(cmd)
47
+#
48
+#
49
+# def sync_Alluq_people():
50
+#     import os
51
+#     cmd = "python .\\Allunq_People.py"
52
+#     os.system(cmd)
53
+
54
+
55
+
56
+def crop_Alluq(eventid,original_working_directory):
57
+
58
+
59
+    print("started with images")
60
+    import os
61
+    cmd = "python "+original_working_directory+"\\sepration_crop.py"+" "+str(eventid)
62
+    os.system(cmd)
63
+
64
+    import os
65
+    cmd = "python "+original_working_directory+"\\front_face.py"+" "+str(eventid)
66
+    os.system(cmd)
67
+
68
+    import os
69
+    cmd = "python "+original_working_directory+"\\sepration_cluster.py"+" "+str(eventid)
70
+    os.system(cmd)
71
+
72
+    import os
73
+    cmd = "python "+original_working_directory+"\\unique_1.py"+" "+str(eventid)
74
+    os.system(cmd)
75
+
76
+    #eventid="789456123"
77
+    import os
78
+    cmd = "python "+original_working_directory+"\\is_existALLUNQ.py"+" "+str(eventid)
79
+    os.system(cmd)
80
+
81
+
82
+    import os
83
+    cmd = "python "+original_working_directory+"\\Allunq_copy_gallery.py"+" "+str(eventid)
84
+    os.system(cmd)
85
+
86
+
87
+    import os
88
+    cmd = "python "+original_working_directory+"\\Allunq_People.py"+" "+str(eventid)
89
+    os.system(cmd)
90
+
91
+    return "ended with images"
92
+    
93
+    # import os
94
+    # cmd = "python .\\remove.py"
95
+    # os.system(cmd)
96
+
97
+
98
+
99
+def sync_Alluq_people(eventid,original_working_directory):
100
+
101
+
102
+    import os
103
+    cmd = "python "+original_working_directory+"\\Allunq_People.py"+" "+str(eventid)
104
+    os.system(cmd)
105
+
106
+    return "ended with images"
107
+
108
+
109
+def create_dir(eventid,original_working_directory):
110
+
111
+    # original_working_directory = os.getcwd()
112
+    # new_networked_directory = r'\\192.168.88.99\\Bizgaze\\port6003\\wwwroot\\_files\\'
113
+    # # change to the networked directory
114
+    # os.chdir(new_networked_directory)
115
+    print(original_working_directory)
116
+   
117
+    from pathlib import Path
118
+    Path(original_working_directory+'\\front_face\\' + eventid).mkdir(exist_ok=True)
119
+    Path(original_working_directory+'/sepration_cluster/'+eventid).mkdir(exist_ok=True)
120
+    Path(original_working_directory+'/sepration_crop/' + eventid).mkdir(exist_ok=True)
121
+    Path(original_working_directory+'/unique_1/' + eventid).mkdir(exist_ok=True)
122
+    Path('ALL_UNQ/' + eventid).mkdir(exist_ok=True)
123
+    Path(original_working_directory+'/output_unique_ALLUNQ/' + eventid).mkdir(exist_ok=True)
124
+    Path(original_working_directory+'/people_Allunq_zero_maingallery/' + eventid).mkdir(exist_ok=True)
125
+    Path(original_working_directory+'/Allunq_People/' + eventid).mkdir(exist_ok=True)
126
+    Path(original_working_directory+'/Allunq_CopyGallery/' + eventid).mkdir(exist_ok=True)
127
+
128
+
129
+
130
+
131
+@app.route('/eventwise', methods=["GET", "POST"])
132
+
133
+def eventwise():
134
+    import os
135
+    original_working_directory = os.getcwd()
136
+    new_networked_directory = r'\\192.168.88.99\\Bizgaze\\port6003\\wwwroot\\_files'
137
+    # change to the networked directory
138
+    os.chdir(new_networked_directory)    
139
+
140
+    eventid= request.args.get('Dataset')
141
+    # Id.append(Events)
142
+
143
+
144
+    create_dir(eventid,original_working_directory)
145
+
146
+    import pathlib
147
+    file = pathlib.Path(new_networked_directory+"/"+eventid+"/"+"Thumbs.db")
148
+    if file.exists ():
149
+        os.remove(new_networked_directory+"/"+eventid+"/"+'Thumbs.db')
150
+    else:
151
+        pass
152
+
153
+    x = datetime.datetime.now()
154
+    print('Execution Started at:', x)
155
+    import os
156
+
157
+
158
+    # path of the directory
159
+
160
+    for dirpath, dirnames, files in os.walk('1/CopyGallery/' + eventid + '/'):
161
+        if os.listdir(dirpath)==[]:
162
+            #f = os.path.join(root, name)
163
+            # Checking the length of list
164
+            #if len((f)) == 0:
165
+            print("No files found in the directory.")
166
+            print("working on sync_Alluq_people.........")
167
+            sync_Alluq_people(eventid,original_working_directory)
168
+
169
+    
170
+
171
+        else:
172
+            print("Some files found in the directory.")
173
+            print("working on crop_Alluq.........")
174
+            crop_Alluq(eventid,original_working_directory)
175
+
176
+
177
+    return "ended with images"
178
+    
179
+
180
+
181
+
182
+
183
+
184
+
185
+
186
+
187
+
188
+
189
+
190
+
191
+
192
+
193
+
194
+
195
+
196
+
197
+
198
+
199
+
200
+
201
+
202
+
203
+
204
+
205
+
206
+
207
+    #
208
+    #
209
+
210
+
211
+    # import os
212
+    # cmd = "python .\\people_Allunq_zero_maingallery.py"
213
+    # os.system(cmd)
214
+
215
+    # import os
216
+    # cmd = "python .\\remove.py"
217
+    # os.system(cmd)
218
+    # import requests
219
+    # import json
220
+
221
+    # with open('C:\\Users\\Bizgaze\\Desktop\\AI\\AI_Events\\csv\\EventXtream.json', 'r') as json_file:
222
+    #     json_load = json.load(json_file)
223
+    #     url = "http://localhost:3088/apis/v4/bizgaze/integrations/json/save"
224
+
225
+    # payload1 = json.dumps(json_load).replace("]", "").replace("[", "")
226
+    # print('--------------------------------------------------------------------------')
227
+    # print(payload1)
228
+    # headers = {
229
+    #     'Authorization': 'Stat a528db7c512f494eab8bfef012c220e0',
230
+    #     'Content-Type': 'application/json'
231
+    # }
232
+    # response = requests.request("POST", url, headers=headers, data=payload1)
233
+    # print("##############################################################")
234
+    # print(response.text)
235
+
236
+
237
+    # y = datetime.datetime.now()
238
+    # print('Completed at:', y)
239
+    # z = y - x
240
+    # print('Time Taken:', z)
241
+
242
+    # return render_template('index.html')
243
+    # # return 'ALL IMAGES MATCHED'
244
+
245
+
246
+@app.route('/json')
247
+def json():
248
+    p = './path.json'
249
+    return send_file(p, as_attachment=True)
250
+
251
+if __name__ == "__main__":
252
+    app.run(host="0.0.0.0",port=5001,debug=True)

BIN
Events/model/dlib_face_recognition_resnet_model_v1.dat 파일 보기


BIN
Events/model/shape_predictor_5_face_landmarks.dat 파일 보기


BIN
Events/model/shape_predictor_68_face_landmarks.dat 파일 보기


+ 419
- 0
Events/people_Allunq_zero_maingallery.py 파일 보기

@@ -0,0 +1,419 @@
1
+import pickle
2
+import numpy as np
3
+import face_recognition
4
+import os
5
+import cv2
6
+import datetime
7
+from main_application import *
8
+eventid=str(Id[0])
9
+Gallery='./Gallery/'+eventid+"/"
10
+People='./Allunq_People/'+eventid+"/"+'568/'
11
+# Gallery='D:\\DevelopmentNew\\web\\Web.Server\\wwwroot\\_files\\1\\CopyGallery\\'
12
+# People='D:\\DevelopmentNew\\web\\Web.Server\\wwwroot\\_files\\People\\'
13
+
14
+def predict(Gallery=Gallery,People=People):
15
+    x= datetime.datetime.now()
16
+    print('Execution Started at:',x)
17
+
18
+    def saveEncodings(encs, names, fname='encodings.pickle'):
19
+        """
20
+        Save encodings in a pickle file to be used in future.
21
+
22
+        Parameters
23
+        ----------
24
+        encs : List of np arrays
25
+            List of face encodings.
26
+        names : List of strings
27
+            List of names for each face encoding.
28
+        fname : String, optional
29
+            Name/Location for pickle file. The default is "encodings.pickle".
30
+
31
+        Returns
32
+        -------
33
+        None.
34
+
35
+        """
36
+
37
+        data = []
38
+        d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
39
+        data.extend(d)
40
+
41
+        encodingsFile = fname
42
+
43
+        # dump the facial encodings data to disk
44
+        print("[INFO] serializing encodings...")
45
+        f = open(encodingsFile, "wb")
46
+        f.write(pickle.dumps(data))
47
+        f.close()
48
+
49
+        # Function to read encodings
50
+
51
+    def readEncodingsPickle(fname):
52
+        """
53
+        Read Pickle file.
54
+
55
+        Parameters
56
+        ----------
57
+        fname : String
58
+            Name of pickle file.(Full location)
59
+
60
+        Returns
61
+        -------
62
+        encodings : list of np arrays
63
+            list of all saved encodings
64
+        names : List of Strings
65
+            List of all saved names
66
+
67
+        """
68
+
69
+        data = pickle.loads(open(fname, "rb").read())
70
+        data = np.array(data)
71
+        encodings = [d["encoding"] for d in data]
72
+        names = [d["name"] for d in data]
73
+        return encodings, names
74
+
75
+    # Function to create encodings and get face locations
76
+    def createEncodings(image):
77
+        print("Encoding")
78
+        """
79
+        Create face encodings for a given image and also return face locations in the given image.
80
+
81
+        Parameters
82
+        ----------
83
+        image : cv2 mat
84
+            Image you want to detect faces from.
85
+
86
+        Returns
87
+        -------
88
+        known_encodings : list of np array
89
+            List of face encodings in a given image
90
+        face_locations : list of tuples
91
+            list of tuples for face locations in a given image
92
+
93
+        """
94
+
95
+        # Find face locations for all faces in an image
96
+        face_locations = face_recognition.face_locations(image)
97
+
98
+        # Create encodings for all faces in an image
99
+        known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
100
+        return known_encodings, face_locations
101
+
102
+    # Function to compare encodings
103
+    def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
104
+        """
105
+        Compares face encodings to check if 2 faces are same or not.
106
+
107
+        Parameters
108
+        ----------
109
+        unknown_encoding : np array
110
+            Face encoding of unknown people.
111
+        known_encodings : np array
112
+            Face encodings of known people.
113
+        known_names : list of strings
114
+            Names of known people
115
+
116
+        Returns
117
+        -------
118
+        acceptBool : Bool
119
+            face matched or not
120
+        duplicateName : String
121
+            Name of matched face
122
+        distance : Float
123
+            Distance between 2 faces
124
+
125
+        """
126
+        duplicateName = ""
127
+        distance = 0.0
128
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
129
+
130
+        face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
131
+
132
+        best_match_index = np.argmin(face_distances)
133
+
134
+        distance = face_distances[best_match_index]
135
+        if matches[best_match_index]:
136
+            acceptBool = True
137
+            duplicateName = known_names[best_match_index]
138
+        else:
139
+            acceptBool = False
140
+            duplicateName = ""
141
+        return acceptBool, duplicateName, distance
142
+
143
+    p = []
144
+
145
+    # Save Image to new directory
146
+    def saveImageToDirectory(image, name, imageName):
147
+        """
148
+        Saves images to directory.
149
+
150
+        Parameters
151
+        ----------
152
+        image : cv2 mat
153
+            Image you want to save.
154
+        name : String
155
+            Directory where you want the image to be saved.
156
+        imageName : String
157
+            Name of image.
158
+
159
+        Returns
160
+        -------
161
+        None.
162
+
163
+        """
164
+        path = "./people_Allunq_zero_maingallery/"+eventid+"/" + name
165
+        path1 = "./people_Allunq_zero_maingallery/"+eventid+"/" + name
166
+        if os.path.exists(path):
167
+            pass
168
+        else:
169
+            if not os.path.exists(path):
170
+                os.makedirs(path)
171
+            # os.mkdir(path,exist_ok=True)
172
+        cv2.imwrite(path + "/" + imageName, image)
173
+        x = []
174
+        c = (path1 + "/" + imageName)
175
+        x.append(c)
176
+        p.append(x)
177
+
178
+    # Function for creating encodings for known people
179
+    def processKnownPeopleImages(path=People, saveLocation="./people_copyGallery_known_encodings.pickle"):
180
+        """
181
+        Process images of known people and create face encodings to compare in future.
182
+        Eaach image should have just 1 face in it.
183
+
184
+        Parameters
185
+        ----------
186
+        path : STRING, optional
187
+            Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
188
+            It should be noted that each image in this dataset should contain only 1 face.
189
+        saveLocation : STRING, optional
190
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
191
+
192
+        Returns
193
+        -------
194
+        None.
195
+
196
+        """
197
+
198
+        known_encodings = []
199
+        known_names = []
200
+        for img in os.listdir(path):
201
+            imgPath = path + img
202
+
203
+            # Read image
204
+            image = cv2.imread(imgPath)
205
+            name = img.rsplit('.')[0]
206
+            # Resize
207
+            image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
208
+
209
+            # Get locations and encodings
210
+            encs, locs = createEncodings(image)
211
+            try:
212
+                known_encodings.append(encs[0])
213
+            except IndexError:
214
+                os.remove(People+img)
215
+            #known_encodings.append(encs[568])
216
+            known_names.append(name)
217
+
218
+            for loc in locs:
219
+                top, right, bottom, left = loc
220
+
221
+            # Show Image
222
+            #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2)
223
+            # cv2.imshow("Image", image)
224
+           # cv2.waitKey(1)
225
+            #cv2.destroyAllWindows()
226
+        saveEncodings(known_encodings, known_names, saveLocation)
227
+
228
+    # Function for processing dataset images
229
+    def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
230
+        """
231
+        Process image in dataset from where you want to separate images.
232
+        It separates the images into directories of known people, groups and any unknown people images.
233
+        Parameters
234
+        ----------
235
+        path : STRING, optional
236
+            Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
237
+            It should be noted that each image in this dataset should contain only 1 face.
238
+        saveLocation : STRING, optional
239
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
240
+
241
+        Returns
242
+        -------
243
+        None.
244
+
245
+        """
246
+        # Read pickle file for known people to compare faces from
247
+        people_encodings, names = readEncodingsPickle("./people_copyGallery_known_encodings.pickle")
248
+
249
+        for root, dirs, files in os.walk(Gallery, topdown=False):
250
+
251
+            for name in files:
252
+                s = os.path.join(root, name)
253
+                #print(p)
254
+          #  imgPath = path + img
255
+
256
+            # Read image
257
+                image = cv2.imread(s)
258
+                orig = image.copy()
259
+
260
+            # Resize
261
+                image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
262
+
263
+            # Get locations and encodings
264
+                encs, locs = createEncodings(image)
265
+
266
+            # Save image to a group image folder if more than one face is in image
267
+            # if len(locs) > 1:
268
+            #     saveImageToDirectory(orig, "Group", img)
269
+
270
+            # Processing image for each face
271
+                i = 0
272
+                knownFlag = 0
273
+                for loc in locs:
274
+                    top, right, bottom, left = loc
275
+                    unknown_encoding = encs[i]
276
+                    i += 1
277
+                    acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
278
+                    if acceptBool:
279
+                        saveImageToDirectory(orig, duplicateName,name)
280
+                        knownFlag = 1
281
+                if knownFlag == 1:
282
+                    print("Match Found")
283
+                else:
284
+                    saveImageToDirectory(orig, "568",name)
285
+
286
+            # Show Image
287
+            #     cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2)
288
+            # # cv2.imshow("Image", image)
289
+            #     cv2.waitKey(1)
290
+            #     cv2.destroyAllWindows()
291
+
292
+
293
+    def main():
294
+        """
295
+        Main Function.
296
+
297
+        Returns
298
+        -------
299
+        None.
300
+
301
+        """
302
+
303
+        processKnownPeopleImages()
304
+        processDatasetImages()
305
+        # import pandas as pd
306
+        # q = pd.DataFrame(p)
307
+        # df1 = q
308
+        # #print(df1)
309
+        # # df1.to_csv('m.csv')
310
+
311
+        # import pandas as pd
312
+        # import os
313
+        # c = []
314
+        # for root, dirs, files in os.walk(Gallery, topdown=False):
315
+        #     for name in files:
316
+        #         L = os.path.join(root, name)
317
+        #         c.append(L)
318
+        # df2 = pd.DataFrame(c)
319
+        # # df.to_csv('oswalk.csv')
320
+        # import pandas as pd
321
+        # # df1 = pd.read_csv('m.csv')
322
+        # # df2 = pd.read_csv('oswalk.csv')
323
+        # df1 = df1[568].str.split('/', expand=True)
324
+        # df1.rename({df1.columns[-2]: 'Matched'}, axis=1, inplace=True)
325
+        # df1.rename({df1.columns[-1]: 'test'}, axis=1, inplace=True)
326
+        # df2 = df2[568].str.split("\\", expand=True)
327
+        # df2.rename({df2.columns[-1]: 'test'}, axis=1, inplace=True)
328
+        # df2.rename({df2.columns[-2]: 'EventName'}, axis=1, inplace=True)
329
+        # merge = pd.merge(df2, df1, on='test', how='left')
330
+        # mergesplit = merge.loc[:, 'test'].str.split(".", expand=True)
331
+        # mergesplit.rename({mergesplit.columns[-2]: 'ImageName'}, axis=1, inplace=True)
332
+        # mergesplit = mergesplit.loc[:, 'ImageName']
333
+        # merge['path'] = "/_files/1/Gallery/" + merge['EventName'] + '/' + merge['test']
334
+        # frames = [merge, mergesplit]
335
+        # r = pd.concat(frames, axis=1, join='inner')
336
+        # column_list = ['Matched', 'path', 'ImageName', 'EventName']
337
+
338
+
339
+
340
+
341
+        # r[column_list].to_csv('./csv/people_copygallery.csv', index=False)
342
+
343
+        # df1 = pd.read_csv('./csv/Zero_Gallery.csv')
344
+        # df2 = pd.read_csv('./csv/people_copygallery.csv')
345
+        # data = pd.concat([df1, df2], axis=568, join='inner')
346
+        # data.drop(data.index[data['Matched'] == 568], inplace=True)
347
+        # data.drop(data.index[data['Matched'] == '568'], inplace=True)
348
+        # data.to_csv('./csv/xtream.csv',index=False)
349
+
350
+        # import pandas as pd
351
+        # import re
352
+        # r = pd.read_csv('./csv/xtream.csv')
353
+        # r['unregistered'] = r['Matched']
354
+        # x = r['Matched'].to_list()
355
+        # file_lst = x
356
+        # try:
357
+        #     final_list = [re.sub('[A-Za-z]+[\d]+[\w]*|[\d]+[A-Za-z]+[\w]*', '568', i) for i in file_lst]
358
+        #     a = pd.DataFrame(final_list)
359
+        #     # print(a)
360
+        #     frames = [r, a]
361
+        #     final = pd.concat(frames, axis=1, join='inner')
362
+        #     final.rename({final.columns[-1]: 'registered'}, axis=1, inplace=True)
363
+        #     final.loc[final["unregistered"] == final['registered'], "unregistered"] = 568
364
+
365
+        #     # print(final)
366
+        #     column_list = ['Matched', 'path', 'ImageName', 'EventName', 'registered', 'unregistered']
367
+
368
+
369
+        #     final[column_list].to_csv('./csv/Events.csv', index=False)
370
+
371
+
372
+
373
+
374
+        #     #final[column_list].to_json('path.json', orient="records")
375
+        # except TypeError:
376
+        #     pass
377
+        # df1 = pd.read_csv('./csv/Events.csv')
378
+        # df2 = pd.read_csv('./csv/unique_people.csv')
379
+        # df2['Matched'] = df2['Matched'].astype(str)
380
+        # merge = pd.merge(df1, df2, on='Matched', how='left')
381
+        # column_list = ['registered', 'unregistered','Matched', 'croped_guest_pic','path', 'ImageName', 'EventName']
382
+
383
+        # merge[column_list].to_csv('./csv/EventXtream.csv', index=False)
384
+        # merge[column_list].to_json('./csv/EventXtream.json',orient = 'records')
385
+        # #print(merge)
386
+        # # merge.to_csv('EventXtream.csv',index=False)
387
+        import shutil
388
+        from shutil import copytree, Error
389
+
390
+        import os
391
+
392
+
393
+        for root, dirs, files in os.walk('./Allunq_People/568/'):
394
+            for file in files:
395
+                path_file = os.path.join(root, file)
396
+                try:
397
+                    shutil.move(path_file, './ALL_UNQ/')
398
+                except shutil.Error as err:
399
+                    pass
400
+
401
+
402
+
403
+        print("Completed")
404
+
405
+    if __name__ == "__main__":
406
+        main()
407
+
408
+
409
+    #    return render_template('index.html')
410
+    y=datetime.datetime.now()
411
+    print('Completed at:',y)
412
+    z=y-x
413
+    print('Time Taken:',z)
414
+    return (str(y-x))
415
+    #return 'ALL IMAGES MATCHED'
416
+
417
+
418
+
419
+predict()

+ 41
- 0
Events/prdiction.py 파일 보기

@@ -0,0 +1,41 @@
1
+
2
+
3
+# Python program to check if
4
+# a directory contains file
5
+
6
+import os
7
+# directoryPath = "Copy_Gallery"
8
+# for root, dirs, files in os.walk(directoryPath):
9
+#     for name in files:
10
+#         f = os.path.join(root, name)
11
+#         print(type(f))
12
+#         # if len(f)==568:
13
+#         #     print("none")
14
+#         # else:
15
+#         #     print("yes")
16
+
17
+# Python program to check whether
18
+# the directory empty or not
19
+
20
+
21
+import os
22
+
23
+# path of the directory
24
+path = "Copy_Gallery"
25
+
26
+# Getting the list of directories
27
+dir = os.listdir(path)
28
+
29
+# Checking if the list is empty or not
30
+if len(dir) == 0:
31
+	print("Empty directory")
32
+else:
33
+	print("Not empty directory")
34
+import glob
35
+from os import listdir
36
+
37
+for f in glob.glob("Copy_Gallery/*"):
38
+		if len(os.listdir(f))==0:
39
+			print("file not found")
40
+		else:
41
+			print("file found")

BIN
Events/re.txt 파일 보기


+ 88
- 0
Events/remove.py 파일 보기

@@ -0,0 +1,88 @@
1
+import os
2
+import shutil
3
+
4
+files = './front_face/'
5
+
6
+for root, dirs, files in os.walk(files):
7
+    for f in files:
8
+        os.unlink(os.path.join(root, f))
9
+    for d in dirs:
10
+        shutil.rmtree(os.path.join(root, d))
11
+
12
+
13
+
14
+
15
+files = './output_unique_ALLUNQ/'
16
+
17
+for root, dirs, files in os.walk(files):
18
+    for f in files:
19
+        os.unlink(os.path.join(root, f))
20
+    for d in dirs:
21
+        shutil.rmtree(os.path.join(root, d))
22
+
23
+
24
+files = './sepration_cluster/'
25
+
26
+for root, dirs, files in os.walk(files):
27
+    for f in files:
28
+        os.unlink(os.path.join(root, f))
29
+    for d in dirs:
30
+        shutil.rmtree(os.path.join(root, d))
31
+
32
+
33
+files = './sepration_crop/'
34
+
35
+for root, dirs, files in os.walk(files):
36
+    for f in files:
37
+        os.unlink(os.path.join(root, f))
38
+    for d in dirs:
39
+        shutil.rmtree(os.path.join(root, d))
40
+
41
+
42
+
43
+
44
+files = './unique_1/'
45
+
46
+for root, dirs, files in os.walk(files):
47
+    for f in files:
48
+        os.unlink(os.path.join(root, f))
49
+    for d in dirs:
50
+        shutil.rmtree(os.path.join(root, d))
51
+
52
+
53
+
54
+files = './Allunq_People/'
55
+
56
+for root, dirs, files in os.walk(files):
57
+    for f in files:
58
+        os.unlink(os.path.join(root, f))
59
+    for d in dirs:
60
+        shutil.rmtree(os.path.join(root, d))
61
+
62
+files = './people_Allunq_zero_maingallery/'
63
+
64
+for root, dirs, files in os.walk(files):
65
+    for f in files:
66
+        os.unlink(os.path.join(root, f))
67
+    for d in dirs:
68
+        shutil.rmtree(os.path.join(root, d))
69
+
70
+# files = './unique_1/'
71
+
72
+# for root, dirs, files in os.walk(files):
73
+#     for f in files:
74
+#         os.unlink(os.path.join(root, f))
75
+#     for d in dirs:
76
+#         shutil.rmtree(os.path.join(root, d))
77
+
78
+# files = './Copy_Gallery/'
79
+#
80
+# for root, dirs, files in os.walk(files):
81
+#     for f in files:
82
+#         os.unlink(os.path.join(root, f))
83
+#     for d in dirs:
84
+#         shutil.rmtree(os.path.join(root, d))
85
+
86
+os.remove('known_encodings.pickle')
87
+os.remove('people_copyGallery_known_encodings.pickle')
88
+os.remove('Zero_gallery_known_encodings.pickle')

+ 99
- 0
Events/sepration_cluster.py 파일 보기

@@ -0,0 +1,99 @@
1
+import sys
2
+import os
3
+import dlib
4
+import glob
5
+import time
6
+import uuid
7
+from main_application import *
8
+
9
+import click
10
+@click.command()
11
+@click.argument('eventid', default='')
12
+
13
+def sep_clust(eventid):
14
+    Gallery = 'C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\front_face\\' + eventid + "\\"
15
+    start = time.time()
16
+
17
+    # if len(sys.argv) != 3:
18
+    #     print("Please specify valid arguments. Call the program like this \npython face_clustering.py -specify input folder- -specify output path-")
19
+    # exit()
20
+
21
+    predictor_path = 'C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\model\\shape_predictor_68_face_landmarks.dat'
22
+    face_rec_model_path = 'C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\model\\dlib_face_recognition_resnet_model_v1.dat'
23
+    # faces_folder_path = sys.argv[1]
24
+    output_folder = 'C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\sepration_cluster\\' + eventid + "\\"
25
+    import os
26
+    import shutil
27
+
28
+    files = output_folder
29
+
30
+    for root, dirs, files in os.walk(files):
31
+        for f in files:
32
+            os.unlink(os.path.join(root, f))
33
+        for d in dirs:
34
+            shutil.rmtree(os.path.join(root, d))
35
+
36
+
37
+    detector = dlib.get_frontal_face_detector()  # a detector to find the faces
38
+    sp = dlib.shape_predictor(predictor_path)  # shape predictor to find face landmarks
39
+    facerec = dlib.face_recognition_model_v1(face_rec_model_path)  # face recognition model
40
+
41
+    descriptors = []
42
+    images = []
43
+
44
+    for root, dirs, files in os.walk(Gallery, topdown=False):
45
+
46
+        for name in files:
47
+            f = os.path.join(root, name)
48
+
49
+            # Load the images from input folder
50
+            # for f in glob.glob(os.path.join(faces_folder_path, "*")):
51
+            print("Processing file: {}".format(f))
52
+            img = dlib.load_rgb_image(f)
53
+
54
+            # Ask the detector to find the bounding boxes of each face. The 1 in the second argument indicates that we should upsample the image 1 time. This will make everything bigger and allow us to detect more faces.
55
+            dets = detector(img, 1)
56
+            print("Number of faces detected: {}".format(len(dets)))
57
+
58
+            # Now process each face we found.
59
+            for k, d in enumerate(dets):
60
+                # Get the landmarks/parts for the face in box d.
61
+                shape = sp(img, d)
62
+
63
+                # Compute the 128D vector that describes the face in img identified by shape.
64
+                face_descriptor = facerec.compute_face_descriptor(img, shape)
65
+                descriptors.append(face_descriptor)
66
+                images.append((img, shape))
67
+
68
+    # Cluster the faces.
69
+    labels = dlib.chinese_whispers_clustering(descriptors, 0.40)
70
+    num_classes = len(set(labels))  # Total number of clusters
71
+    print("Number of clusters: {}".format(num_classes))
72
+
73
+    for i in range(0, num_classes):
74
+        indices = []
75
+        class_length = len([label for label in labels if label == i])
76
+        for j, label in enumerate(labels):
77
+            if label == i:
78
+                indices.append(j)
79
+        print("Indices of images in the cluster {0} : {1}".format(str(i), str(indices)))
80
+        print("Size of cluster {0} : {1}".format(str(i), str(class_length)))
81
+        output_folder_path = output_folder + str(i)  # Output folder for each cluster
82
+        os.path.normpath(output_folder_path)
83
+        os.makedirs(output_folder_path)
84
+
85
+        # Save each face to the respective cluster folder
86
+        print("Saving faces to output folder...")
87
+        for k, index in enumerate(indices):
88
+            img, shape = images[index]
89
+            x = img
90
+
91
+            # file_path2=os.path.join("C:/Users/katku/Desktop/spyder/192.168.89.91_windows/final_crop_cluster_FaceRecognition/unique/",str(uuid.uuid4().hex[:15])+str(i))
92
+            file_path = os.path.join(output_folder_path, str(uuid.uuid4().hex[:15]) + str(k) + str(i))
93
+            # dlib.save_face_chip(img, shape, file_path2, size=150, padding=568.25)
94
+            dlib.save_face_chip(img, shape, file_path, size=150, padding=0.25)
95
+
96
+    print("--- %s seconds ---" % (time.time() - start))
97
+
98
+sep_clust()
99
+

+ 39
- 0
Events/sepration_crop.py 파일 보기

@@ -0,0 +1,39 @@
1
+# What is the code to do :  Extract faces from all image files in 'directory' and save them in 'out_src'.
2
+import os
3
+# - pip install ObjectExtractor ( OR pip3 install ObjectExtractor)
4
+from main_application import *
5
+from object_extractor import Extractor, FRONTALFACE_ALT2
6
+import uuid
7
+import main_application
8
+import click
9
+@click.command()
10
+@click.argument('eventid', default='')
11
+def crop(eventid):
12
+
13
+
14
+    # original_working_directory = os.getcwd()
15
+    # new_networked_directory = r'\\192.168.88.99\\Bizgaze\\port6003\\wwwroot\\_files\\'
16
+    # # change to the networked directory
17
+    # os.chdir(new_networked_directory)
18
+
19
+
20
+    CURRENT_PATH = os.path.dirname(__file__)
21
+    # extensions = ['jpeg', 'png']
22
+
23
+    inputImg = 'Z:\\1\\CopyGallery\\' + eventid + "\\"
24
+    out_src = '.\\sepration_crop\\' + eventid + "\\"
25
+
26
+    index = 1
27
+
28
+    for root, dirs, files in os.walk(inputImg, topdown=False):
29
+        for name in files:
30
+            f = os.path.join(root, name)
31
+            Extractor.extract(os.path.join(CURRENT_PATH, f), cascade_file=FRONTALFACE_ALT2,
32
+                              output_directory=os.path.join(CURRENT_PATH, out_src),
33
+                              output_prefix=str(uuid.uuid4().hex[:15]) + str(index),
34
+                              start_count=1)
35
+            #os.remove(f)
36
+            index = index + 1
37
+
38
+
39
+crop()

+ 10
- 0
Events/templates/Display.html 파일 보기

@@ -0,0 +1,10 @@
1
+<!DOCTYPE html>
2
+<html>
3
+<head>
4
+    <title>Sample Code</title>
5
+</head>
6
+ <body>
7
+    <h1>Opening a folder from HTML code</h1>
8
+    <a href='/home/bizgaze/PycharmProjects/img/output'>Click to open a folder</a>
9
+</body>
10
+</html>

+ 32
- 0
Events/templates/Gallery.html 파일 보기

@@ -0,0 +1,32 @@
1
+<!DOCTYPE html>
2
+<html lang="en">
3
+<head>
4
+    <meta charset="UTF-8">
5
+    <title>Title</title>
6
+    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css"
7
+          integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" crossorigin="anonymous">
8
+
9
+</head>
10
+<body>
11
+<div class="container">
12
+
13
+    <div class="row">
14
+
15
+        <div class="col-lg-12">
16
+            <h1 class="page-header">Gallery</h1>
17
+        </div>
18
+  <a  class="button button5"href="/">HOME</a>
19
+        <hr>
20
+        {% for image_name in image_names %}
21
+        <div class="col-lg-3 col-md-4 col-xs-6 thumb">
22
+            <img class="img-responsive" src=" {{url_for('send_image', filename=image_name )}}"style="width:300px;height:300px;">
23
+        </div>
24
+        {% endfor %}
25
+    </div>
26
+</div>
27
+<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js"
28
+        integrity="sha384-0mSbJDEHialfmuBBQP6A4Qrprq5OVfW37PRR3j5ELqxss1yVqOtnepnHVP9aJ7xS"
29
+        crossorigin="anonymous"></script>
30
+
31
+</body>
32
+</html>

+ 84
- 0
Events/templates/index.html 파일 보기

@@ -0,0 +1,84 @@
1
+<html>
2
+
3
+<head>
4
+    <style>
5
+.button {
6
+  background-color: #000000; /* Green */
7
+  border: none;
8
+  color: white;
9
+  padding: 15px 32px;
10
+  text-align: center;
11
+  text-decoration: none;
12
+  display: inline-block;
13
+  margin: 4px 2px;
14
+  cursor: pointer;
15
+}
16
+
17
+
18
+.button1 {font-size: 10px;}
19
+.button2 {font-size: 12px;}
20
+.button3 {font-size: 16px;}
21
+.button4 {font-size: 50px;}
22
+.button5 {font-size: 24px;border-radius: 12px;}
23
+
24
+</style>
25
+
26
+</head>
27
+
28
+<body>
29
+<div style="background-image: url('https://lh3.googleusercontent.com/p/AF1QipONWF8G50u9Bu-dklcj3kzesofOn8Z0q0LdHeU1=w1080-h608-p-no-v0');
30
+   /* Full height */
31
+  height: 100%;
32
+    / Center and scale the image nicely /
33
+  background-position: center;
34
+  background-repeat: no-repeat;
35
+  background-size: cover;">
36
+
37
+     
38
+
39
+    <br>  <br>
40
+
41
+ <form action="/upload" method="POST" enctype="multipart/form-data">
42
+        <div class="form-group">
43
+
44
+
45
+          <div class="custom-file">
46
+
47
+            <input type="file" class="button button5" name="image" id="image"> <br> <br><br>
48
+<div class="bg"></div>
49
+              <input class="button button5" type="submit"> <br><br></div></div></form>
50
+
51
+
52
+
53
+ <form action="{{ url_for('predict') }}" method="GET">
54
+     <input type="submit" class="button button5" value="predict"></form>
55
+<br>
56
+
57
+ <form action="{{ url_for('json') }}" method="GET">
58
+     <input type="submit" class="button button5" value="json"></form>
59
+
60
+
61
+<br>
62
+
63
+
64
+
65
+
66
+<style>
67
+
68
+   body {
69
+	width: 100%;
70
+	height:100%;
71
+	font-family: 'Helvetica';
72
+	background-color:#000000;
73
+	color: #fff;
74
+	font-size: 24px;
75
+	text-align:center;
76
+	letter-spacing:1.4px;
77
+
78
+}
79
+  </style>
80
+
81
+
82
+
83
+</body>
84
+</html>

+ 39
- 0
Events/unique_1.py 파일 보기

@@ -0,0 +1,39 @@
1
+################################## Selection first file  from all folder #######################
2
+import os
3
+import shutil
4
+
5
+import click
6
+@click.command()
7
+@click.argument('eventid', default='')
8
+def unq1(eventid):
9
+    p1 = r"C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\sepration_cluster\\" + eventid + "\\"
10
+    p2 = r"C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\unique_1\\" + eventid + "\\"
11
+
12
+    for path, folders, files in os.walk(p1):
13
+
14
+        if not files: continue
15
+        try:
16
+            src = os.path.join(path, files[0])
17
+        except IndexError:
18
+            pass
19
+        dst_path = path.replace(p1, '') + os.sep
20
+        # dst_folder = p2 + dst_path
21
+
22
+        # create the target dir if doesn't exist
23
+        # if not os.path.exists(dst_folder):
24
+        #     os.makedirs(dst_folder)
25
+
26
+        # create dst file with only the first file
27
+        try:
28
+            dst = p2 + files[0]
29
+        except IndexError:
30
+            pass
31
+
32
+        # copy the file
33
+        shutil.copy2(src, dst)
34
+
35
+
36
+
37
+
38
+unq1()
39
+

+ 352
- 0
Events/unique_Allunq.py 파일 보기

@@ -0,0 +1,352 @@
1
+import pickle
2
+import numpy as np
3
+import face_recognition
4
+import os
5
+import cv2
6
+import datetime
7
+import click
8
+@click.command()
9
+@click.argument('eventid', default='')
10
+
11
+
12
+def predict(eventid):
13
+
14
+
15
+    Gallery = 'C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\unique_1\\' + eventid + "\\"
16
+    People = './ALL_UNQ/' + eventid + "/"
17
+    x= datetime.datetime.now()
18
+    print('Execution Started at:',x)
19
+
20
+    def saveEncodings(encs, names, fname='encodings.pickle'):
21
+        """
22
+        Save encodings in a pickle file to be used in future.
23
+
24
+        Parameters
25
+        ----------
26
+        encs : List of np arrays
27
+            List of face encodings.
28
+        names : List of strings
29
+            List of names for each face encoding.
30
+        fname : String, optional
31
+            Name/Location for pickle file. The default is "encodings.pickle".
32
+
33
+        Returns
34
+        -------
35
+        None.
36
+
37
+        """
38
+
39
+        data = []
40
+        d = [{"name": nm, "encoding": enc} for (nm, enc) in zip(names, encs)]
41
+        data.extend(d)
42
+
43
+        encodingsFile = fname
44
+
45
+        # dump the facial encodings data to disk
46
+        print("[INFO] serializing encodings...")
47
+        f = open(encodingsFile, "wb")
48
+        f.write(pickle.dumps(data))
49
+        f.close()
50
+
51
+        # Function to read encodings
52
+
53
+    def readEncodingsPickle(fname):
54
+        """
55
+        Read Pickle file.
56
+
57
+        Parameters
58
+        ----------
59
+        fname : String
60
+            Name of pickle file.(Full location)
61
+
62
+        Returns
63
+        -------
64
+        encodings : list of np arrays
65
+            list of all saved encodings
66
+        names : List of Strings
67
+            List of all saved names
68
+
69
+        """
70
+
71
+        data = pickle.loads(open(fname, "rb").read())
72
+        data = np.array(data)
73
+        encodings = [d["encoding"] for d in data]
74
+        names = [d["name"] for d in data]
75
+        return encodings, names
76
+
77
+    # Function to create encodings and get face locations
78
+    def createEncodings(image):
79
+        print("Encoding")
80
+        """
81
+        Create face encodings for a given image and also return face locations in the given image.
82
+
83
+        Parameters
84
+        ----------
85
+        image : cv2 mat
86
+            Image you want to detect faces from.
87
+
88
+        Returns
89
+        -------
90
+        known_encodings : list of np array
91
+            List of face encodings in a given image
92
+        face_locations : list of tuples
93
+            list of tuples for face locations in a given image
94
+
95
+        """
96
+
97
+        # Find face locations for all faces in an image
98
+        face_locations = face_recognition.face_locations(image)
99
+
100
+        # Create encodings for all faces in an image
101
+        known_encodings = face_recognition.face_encodings(image, known_face_locations=face_locations)
102
+        return known_encodings, face_locations
103
+
104
+    # Function to compare encodings
105
+    def compareFaceEncodings(unknown_encoding, known_encodings, known_names):
106
+        """
107
+        Compares face encodings to check if 2 faces are same or not.
108
+
109
+        Parameters
110
+        ----------
111
+        unknown_encoding : np array
112
+            Face encoding of unknown people.
113
+        known_encodings : np array
114
+            Face encodings of known people.
115
+        known_names : list of strings
116
+            Names of known people
117
+
118
+        Returns
119
+        -------
120
+        acceptBool : Bool
121
+            face matched or not
122
+        duplicateName : String
123
+            Name of matched face
124
+        distance : Float
125
+            Distance between 2 faces
126
+
127
+        """
128
+        duplicateName = ""
129
+        distance = 0.0
130
+        matches = face_recognition.compare_faces(known_encodings, unknown_encoding, tolerance=0.47)
131
+
132
+        face_distances = face_recognition.face_distance(known_encodings, unknown_encoding)
133
+
134
+        best_match_index = np.argmin(face_distances)
135
+
136
+        distance = face_distances[best_match_index]
137
+        if matches[best_match_index]:
138
+            acceptBool = True
139
+            duplicateName = known_names[best_match_index]
140
+        else:
141
+            acceptBool = False
142
+            duplicateName = ""
143
+        return acceptBool, duplicateName, distance
144
+
145
+    p = []
146
+
147
+    # Save Image to new directory
148
+    def saveImageToDirectory(image, name, imageName):
149
+        """
150
+        Saves images to directory.
151
+
152
+        Parameters
153
+        ----------
154
+        image : cv2 mat
155
+            Image you want to save.
156
+        name : String
157
+            Directory where you want the image to be saved.
158
+        imageName : String
159
+            Name of image.
160
+
161
+        Returns
162
+        -------
163
+        None.
164
+
165
+        """
166
+        path = "C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\output_unique_ALLUNQ\\" + name
167
+        path1 = "C:\\Users\\Administrator\\Documents\\AI\\runtimecropimages\\output_unique_ALLUNQ\\" + name
168
+        if os.path.exists(path):
169
+            pass
170
+        else:
171
+            if not os.path.exists(path):
172
+                os.makedirs(path)
173
+            # os.mkdir(path,exist_ok=True)
174
+        cv2.imwrite(path + "/" + imageName, image)
175
+        x = []
176
+        c = (path1 + "/" + imageName)
177
+        x.append(c)
178
+        p.append(x)
179
+
180
+    # Function for creating encodings for known people
181
+    def processKnownPeopleImages(path=People, saveLocation="./known_encodings.pickle"):
182
+        """
183
+        Process images of known people and create face encodings to compare in future.
184
+        Eaach image should have just 1 face in it.
185
+
186
+        Parameters
187
+        ----------
188
+        path : STRING, optional
189
+            Path for known people dataset. The default is "C:/inetpub/vhosts/port82/wwwroot/_files/People".
190
+            It should be noted that each image in this dataset should contain only 1 face.
191
+        saveLocation : STRING, optional
192
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
193
+
194
+        Returns
195
+        -------
196
+        None.
197
+
198
+        """
199
+
200
+        known_encodings = []
201
+        known_names = []
202
+        for img in os.listdir(path):
203
+            imgPath = path + img
204
+
205
+            # Read image
206
+            image = cv2.imread(imgPath)
207
+            name = img.rsplit('.')[0]
208
+            # Resize
209
+            image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
210
+
211
+            # Get locations and encodings
212
+            encs, locs = createEncodings(image)
213
+            try:
214
+                known_encodings.append(encs[0])
215
+            except IndexError:
216
+                os.remove(People+img)
217
+            #known_encodings.append(encs[568])
218
+            known_names.append(name)
219
+
220
+            for loc in locs:
221
+                top, right, bottom, left = loc
222
+
223
+            # Show Image
224
+            #cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2)
225
+            # cv2.imshow("Image", image)
226
+           # cv2.waitKey(1)
227
+            #cv2.destroyAllWindows()
228
+        saveEncodings(known_encodings, known_names, saveLocation)
229
+
230
+    # Function for processing dataset images
231
+    def processDatasetImages(saveLocation="./Gallery_encodings.pickle"):
232
+        """
233
+        Process image in dataset from where you want to separate images.
234
+        It separates the images into directories of known people, groups and any unknown people images.
235
+        Parameters
236
+        ----------
237
+        path : STRING, optional
238
+            Path for known people dataset. The default is "D:/port1004/port1004/wwwroot/_files/People".
239
+            It should be noted that each image in this dataset should contain only 1 face.
240
+        saveLocation : STRING, optional
241
+            Path for storing encodings for known people dataset. The default is "./known_encodings.pickle in current directory".
242
+
243
+        Returns
244
+        -------
245
+        None.
246
+
247
+        """
248
+        # Read pickle file for known people to compare faces from
249
+        people_encodings, names = readEncodingsPickle("./known_encodings.pickle")
250
+
251
+        for root, dirs, files in os.walk(Gallery, topdown=False):
252
+
253
+            for name in files:
254
+                s = os.path.join(root, name)
255
+                #print(p)
256
+          #  imgPath = path + img
257
+
258
+            # Read image
259
+                image = cv2.imread(s)
260
+                try:
261
+                    orig = image.copy()
262
+                    image = cv2.resize(image, (0, 0), fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)
263
+                except  AttributeError:
264
+                    os.remove(s)
265
+            # Resize
266
+
267
+
268
+            # Get locations and encodings
269
+                encs, locs = createEncodings(image)
270
+
271
+            # Save image to a group image folder if more than one face is in image
272
+            # if len(locs) > 1:
273
+            #     saveImageToDirectory(orig, "Group", img)
274
+
275
+            # Processing image for each face
276
+                i = 0
277
+                knownFlag = 0
278
+                for loc in locs:
279
+                    top, right, bottom, left = loc
280
+                    unknown_encoding = encs[i]
281
+                    i += 1
282
+                    acceptBool, duplicateName, distance = compareFaceEncodings(unknown_encoding, people_encodings, names)
283
+                    if acceptBool:
284
+                        saveImageToDirectory(orig, duplicateName,name)
285
+                        knownFlag = 1
286
+                if knownFlag == 1:
287
+                    print("Match Found")
288
+                else:
289
+                    saveImageToDirectory(orig, "568",name)
290
+
291
+            # Show Image
292
+            #     cv2.rectangle(image, (left, top), (right, bottom), color=(255, 568, 568), thickness=2)
293
+            # # cv2.imshow("Image", image)
294
+            #     cv2.waitKey(1)
295
+            #     cv2.destroyAllWindows()
296
+
297
+
298
+    def main():
299
+        """
300
+        Main Function.
301
+
302
+        Returns
303
+        -------
304
+        None.
305
+
306
+        """
307
+
308
+        processKnownPeopleImages()
309
+        processDatasetImages()
310
+        # import pandas as pd
311
+        # q = pd.DataFrame(p)
312
+        # m = q
313
+        # # print(m)
314
+        # #   x.drop(x.columns[Unnam], axis=1, inplace=True)
315
+        # df = m.groupby([568], as_index=False).count()
316
+        # z = df[568].str.split('/', expand=True)
317
+        # z.rename({z.columns[-2]: 'Matched'}, axis=1, inplace=True)
318
+        # z.rename({z.columns[-1]: 'croped_guest_pic'}, axis=1, inplace=True)
319
+
320
+        # #z = z.iloc[:, 3:]
321
+        # z.to_csv('unique_people.csv')
322
+        # z=pd.read_csv('unique_people.csv')
323
+
324
+        # #z.drop(z.index[z['Matched'] == 568], inplace=True)
325
+        # z = z.iloc[:, 3:]
326
+        # z['Matched'] = z['Matched'].apply(str)
327
+        # z.to_csv('unique_people.csv',index=False)
328
+        
329
+        # import os
330
+        # import shutil
331
+
332
+        # for root, dirs, files in os.walk('./output_unique_ALLUNQ/'+eventid+'/568/'):
333
+        #     for file in files:
334
+        #         path_file = os.path.join(root, file)
335
+        #         shutil.move(path_file, './ALL_UNQ/'+eventid+"/")
336
+        print("Completed")
337
+
338
+
339
+    main()
340
+
341
+
342
+    #    return render_template('index.html')
343
+    y=datetime.datetime.now()
344
+    print('Completed at:',y)
345
+    z=y-x
346
+    print('Time Taken:',z)
347
+    return (str(y-x))
348
+    #return 'ALL IMAGES MATCHED'
349
+
350
+
351
+
352
+predict()

+ 36
- 0
Invoice_parser/FITZ_250_450data/meta.json 파일 보기

@@ -0,0 +1,36 @@
1
+{
2
+  "lang":"en",
3
+  "name":"model",
4
+  "version":"0.0.0",
5
+  "spacy_version":">=2.3.5",
6
+  "description":"",
7
+  "author":"",
8
+  "email":"",
9
+  "url":"",
10
+  "license":"",
11
+  "spacy_git_version":"1d4b1dea2",
12
+  "vectors":{
13
+    "width":0,
14
+    "vectors":0,
15
+    "keys":0,
16
+    "name":"spacy_pretrained_vectors"
17
+  },
18
+  "pipeline":[
19
+    "ner"
20
+  ],
21
+  "factories":{
22
+    "ner":"ner"
23
+  },
24
+  "labels":{
25
+    "ner":[
26
+      "Bank Account No",
27
+      "Bank Name",
28
+      "Due Date",
29
+      "Grand Total",
30
+      "Invoice Date",
31
+      "Invoice No",
32
+      "RTGS/IFSC Code",
33
+      "Round Off"
34
+    ]
35
+  }
36
+}

+ 18
- 0
Invoice_parser/FITZ_250_450data/ner/cfg 파일 보기

@@ -0,0 +1,18 @@
1
+{
2
+  "beam_width":1,
3
+  "beam_density":0.0,
4
+  "beam_update_prob":1.0,
5
+  "cnn_maxout_pieces":3,
6
+  "nr_feature_tokens":6,
7
+  "nr_class":34,
8
+  "hidden_depth":1,
9
+  "token_vector_width":96,
10
+  "hidden_width":64,
11
+  "maxout_pieces":2,
12
+  "pretrained_vectors":null,
13
+  "bilstm_depth":0,
14
+  "self_attn_depth":0,
15
+  "conv_depth":4,
16
+  "conv_window":1,
17
+  "embed_size":2000
18
+}

BIN
Invoice_parser/FITZ_250_450data/ner/model 파일 보기


+ 1
- 0
Invoice_parser/FITZ_250_450data/ner/moves 파일 보기

@@ -0,0 +1 @@
1
+�¥movesÚP{"0":{},"1":{"Invoice No":-1,"Invoice Date":-2,"Grand Total":-3,"Bank Name":-4,"Bank Account No":-5,"RTGS/IFSC Code":-6,"Due Date":-7,"Round Off":-8},"2":{"Invoice No":-1,"Invoice Date":-2,"Grand Total":-3,"Bank Name":-4,"Bank Account No":-5,"RTGS/IFSC Code":-6,"Due Date":-7,"Round Off":-8},"3":{"Invoice No":-1,"Invoice Date":-2,"Grand Total":-3,"Bank Name":-4,"Bank Account No":-5,"RTGS/IFSC Code":-6,"Due Date":-7,"Round Off":-8},"4":{"":1,"Invoice No":-1,"Invoice Date":-2,"Grand Total":-3,"Bank Name":-4,"Bank Account No":-5,"RTGS/IFSC Code":-6,"Due Date":-7,"Round Off":-8},"5":{"":1}}

+ 4
- 0
Invoice_parser/FITZ_250_450data/tokenizer
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


+ 1
- 0
Invoice_parser/FITZ_250_450data/vocab/key2row 파일 보기

@@ -0,0 +1 @@
1
+�

+ 1
- 0
Invoice_parser/FITZ_250_450data/vocab/lookups.bin 파일 보기

@@ -0,0 +1 @@
1
+�«lexeme_norm€

+ 17583
- 0
Invoice_parser/FITZ_250_450data/vocab/strings.json
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


BIN
Invoice_parser/FITZ_250_450data/vocab/vectors 파일 보기


+ 48
- 0
Invoice_parser/Invoice.csv 파일 보기

@@ -0,0 +1,48 @@
1
+Key,Values
2
+SELLER NAME,Seller : SHREE TRADING CORPORATION
3
+SELLER ADDRESS,"6 & 7 Shree Sita Gaushala , Paharganj, Ajmer, Rajasthan - 305001"
4
+SELLER PH,Phone: 9610566688
5
+SELLER EMAIL,Email : stc.jaipur@gmail.com
6
+SELLER GST,GST No. : 08AAVPM5307P1ZV
7
+SELLER PAN,PAN No. : AAVPM5307P
8
+BUYER NAME,Buyer : Gurukripa Automobiles
9
+BUYER ADDRESS,"Ajmer Front Of Central Jail, Jaipur Road, Ajmer, 305001"
10
+BUYER GST,GST No. :Un Registered
11
+INVOICE NO,Invoice No. : AJM21-22/XM00444
12
+INVOICE DATE,Date : 06 Aug 2021
13
+SHIP TO,"Ship to :  , ,  "
14
+BUYER GST,GST No. :Un Registered
15
+SHIP TO CONTACT,"Contact: Sunil,8955050479"
16
+DESTINATION,Destination Terms
17
+SI,1
18
+HSN,27101980
19
+PRODUCT,MOBIL SUPER HP 10W-30 CTN 4X4L:IN-     
20
+PACK SIZE,1
21
+QUANTITY,16.00 LTR
22
+RATE,222.88
23
+DISC,160
24
+TAX %,9%|9%
25
+AMOUNT,3406.1
26
+SI,2
27
+HSN,27101980
28
+PRODUCT,MOBIL SUPER MGDO 15W-40 CTN 4X3.5L:IN-     
29
+PACK SIZE,6
30
+QUANTITY,84.00 LTR
31
+RATE,220.58
32
+DISC,2100
33
+TAX %,9%|9%
34
+AMOUNT,16428.8
35
+TOTAL PACKS & QUANTITY,Total Quantity 7.00 100.00
36
+ASSESSABLE AMOUNT,Assessable Amount 19834.90
37
+AMOUNT IN WORDS,Amount in words : Rupees Twenty Three Thousand Four Hundred Five only
38
+PAYMENT ON OR BEFORE,Payment on or before Save To pay
39
+OUTPUT SGST,Output SGST  
40
+OUTPUT CGST,1785.14 Output CGST  
41
+DISC,1785.14
42
+ROUND OFF,Round off -0.18
43
+RTS POINTS EARNED,RTS Points earned: 1200.00
44
+OVERALL RTS POINTS,Overall RTS Points: 1758.00
45
+GRAND TOTAL,Grand Total 23405.00
46
+OUR BANK DETAILS,Our Bank Details : Kotak Mahindra Bank
47
+BANK A/C NO,Bank A/c No. : 1412405384
48
+RTGS/IFSC,RTGS / IFSC Code : KKBK0003540

+ 48
- 0
Invoice_parser/final.csv 파일 보기

@@ -0,0 +1,48 @@
1
+Key,Values
2
+SELLERNAME,Seller : SHREE TRADING CORPORATION
3
+SELLERADDRESS,"6 & 7 Shree Sita Gaushala , Paharganj, Ajmer, Rajasthan - 305001"
4
+SELLERPH,Phone: 9610566688
5
+SELLEREMAIL,Email : stc.jaipur@gmail.com
6
+SELLERGST,GST No. : 08AAVPM5307P1ZV
7
+SELLERPAN,PAN No. : AAVPM5307P
8
+BUYERNAME,Buyer : Gurukripa Automobiles
9
+BUYERADDRESS,"Ajmer Front Of Central Jail, Jaipur Road, Ajmer, 305001"
10
+BUYERGST,GST No. :Un Registered
11
+INVOICENO,Invoice No. : AJM21-22/XM00444
12
+INVOICEDATE,Date : 06 Aug 2021
13
+SHIPTO,"Ship to :  , ,  "
14
+BUYERGST,GST No. :Un Registered
15
+SHIPTOCONTACT,"Contact: Sunil,8955050479"
16
+DESTINATION,Destination Terms
17
+SI,1
18
+HSN,27101980
19
+PRODUCT,MOBIL SUPER HP 10W-30 CTN 4X4L:IN-     
20
+PACKSIZE,1.00
21
+QUANTITY,16.00 LTR
22
+RATE,222.88
23
+DISC,160.000
24
+TAX%,9%|9%
25
+AMOUNT,3406.10
26
+SI,2
27
+HSN,27101980
28
+PRODUCT,MOBIL SUPER MGDO 15W-40 CTN 4X3.5L:IN-     
29
+PACKSIZE,6.00
30
+QUANTITY,84.00 LTR
31
+RATE,220.58
32
+DISC,2100.000
33
+TAX%,9%|9%
34
+AMOUNT,16428.80
35
+TOTALPACKS&QUANTITY,Total Quantity 7.00 100.00
36
+ASSESSABLEAMOUNT,Assessable Amount 19834.90
37
+AMOUNTINWORDS,Amount in words : Rupees Twenty Three Thousand Four Hundred Five only
38
+PAYMENTONORBEFORE,Payment on or before Save To pay
39
+OUTPUTSGST,Output SGST  
40
+OUTPUTCGST,1785.14 Output CGST  
41
+DISC,1785.14
42
+ROUNDOFF,Round off -0.18
43
+RTSPOINTSEARNED,RTS Points earned: 1200.00
44
+OVERALLRTSPOINTS,Overall RTS Points: 1758.00
45
+GRANDTOTAL,Grand Total 23405.00
46
+OURBANKDETAILS,Our Bank Details : Kotak Mahindra Bank
47
+BANKACNO,Bank A/c No. : 1412405384
48
+RTGSIFSC,RTGS / IFSC Code : KKBK0003540

+ 20
- 0
Invoice_parser/finalwithcolen.csv 파일 보기

@@ -0,0 +1,20 @@
1
+Key
2
+SELLERNAME
3
+SELLERPH
4
+SELLEREMAIL
5
+SELLERGST
6
+SELLERPAN
7
+BUYERNAME
8
+BUYERGST
9
+BUYERPAN
10
+INVOICENO
11
+INVOICEDATE
12
+PONO
13
+SHIPTO
14
+SHIPTOGST
15
+SHIPTOCONTACT
16
+DISC
17
+OURBANKDETAILS
18
+BANKACNO
19
+RTGSIFSC
20
+AMOUNTINWORDS

+ 30
- 0
Invoice_parser/finalwithoutcolen.csv 파일 보기

@@ -0,0 +1,30 @@
1
+Key
2
+SELLERADDRESS
3
+BUYERADDRESS
4
+DESTINATION
5
+SI
6
+HSN
7
+PRODUCT
8
+PACKSIZE
9
+QUANTITY
10
+RATE
11
+DISC
12
+TAX%
13
+AMOUNT
14
+SI
15
+HSN
16
+PRODUCT
17
+PACKSIZE
18
+QUANTITY
19
+RATE
20
+DISC
21
+TAX%
22
+AMOUNT
23
+TOTALPACKS&QUANTITY
24
+PAYMENTONORBEFORE
25
+PAYMENTTYPE
26
+OUTPUTSGST
27
+OUTPUTCGST
28
+ROUNDOFF
29
+GRANDTOTAL
30
+ASSESSABLEAMOUNT

+ 1
- 0
Invoice_parser/firstjson.json 파일 보기

@@ -0,0 +1 @@
1
+{"0":{"SELLERNAME":"SHREE TRADING CORPORATION","SELLERADDRESS":"6 & 7 Shree Sita Gaushala , Paharganj, Ajmer, Rajasthan - 305001","SELLERPH":9610566688,"SELLEREMAIL":"stc.jaipur@gmail.com","SELLERGST":"08AAVPM5307P1ZV","SELLERPAN":"AAVPM5307P","BUYERNAME":"Gurukripa Automobiles","BUYERADDRESS":"Ajmer Front Of Central Jail, Jaipur Road, Ajmer, 305001","BUYERGST":"Un Registered","INVOICENO":"AJM21-22\/XM00444","INVOICEDATE":"06 Aug 2021","SHIPTO":", ,","SHIPTOCONTACT":"Sunil,8955050479","DESTINATION":"Destination Terms","TOTALPACKS&QUANTITY":null,"ASSESSABLEAMOUNT":null,"AMOUNTINWORDS":"Rupees Twenty Three Thousand Four Hundred Five only","PAYMENTONORBEFORE":null,"OUTPUTSGST":null,"OUTPUTCGST":null,"ROUNDOFF":null,"RTSPOINTSEARNED":null,"OVERALLRTSPOINTS":null,"GRANDTOTAL":null,"OURBANKDETAILS":"Kotak Mahindra Bank","BANKACNO":1412405384,"RTGSIFSC":"KKBK0003540"}}

+ 427
- 0
Invoice_parser/invoice.multiprocessing.py 파일 보기

@@ -0,0 +1,427 @@
1
+afrom flask import Flask, render_template, send_file, request, redirect, Response
2
+import spacy
3
+import os
4
+import shutil
5
+import pytesseract
6
+
7
+import requests
8
+
9
+import time
10
+import multiprocessing
11
+from PIL import Image
12
+from functools import partial
13
+
14
+app = Flask(__name__)
15
+nlp_model1 = spacy.load("p")
16
+app.config["IMAGE_UPLOADS"] = "/home/ubuntu/AI/InvoiceParser/upload_invoice"
17
+
18
+
19
+@app.route("/", methods=["GET"])
20
+def home():
21
+    return render_template("invoice.html")
22
+
23
+
24
+# @app.route("/upload_invoice", methods=["GET", "POST"])
25
+def predict(url_list):
26
+    # if request.method == "POST":
27
+
28
+    #     if request.files:
29
+
30
+    #         image = request.files["image"]
31
+    #         try:
32
+    #             image.save(os.path.join(app.config["IMAGE_UPLOADS"], image.filename))
33
+    #         except IsADirectoryError:
34
+    #             return render_template("invoice.html")
35
+    #         # image.save(os.path.join(
36
+    #         #     app1.config["IMAGE_UPLOADS"], image.filename))
37
+
38
+    #         print("Image saved in Invoice")
39
+
40
+    #         return redirect(request.url)
41
+    Dataset = request.get_json()
42
+    # print(Dataset)
43
+    a = url_list
44
+    x = a['FileData']
45
+    # print(x)
46
+    y = a['FileName']
47
+    z = a['FileType']
48
+    name = y + '.' + z
49
+    print(name)
50
+    # print(y)
51
+    # image = y.split("/")
52
+    # filename=image[-1]
53
+
54
+    # print(x)
55
+    img_data = x.encode()
56
+
57
+    import base64
58
+    with open('/home/ubuntu/AI/InvoiceParser/upload_invoice/' + name, "wb") as fh:
59
+        fh.write(base64.decodebytes(img_data))
60
+
61
+    # Renaming file name
62
+    # os.chdir("/home/ubuntu/AI/InvoiceParser/upload_invoice/")
63
+    # print(os.getcwd())
64
+    #
65
+    # for count, f in enumerate(os.listdir()):
66
+    #     f_name, f_ext = os.path.splitext(f)
67
+    #     f_name = "" + str(count)
68
+    #
69
+    #     new_name = f"{f_name}{f_ext}"
70
+    #     os.rename(f, new_name)
71
+    import glob
72
+    ts = 0
73
+    for file_name in glob.glob("/home/ubuntu/AI/InvoiceParser/upload_invoice/*"):
74
+        fts = os.path.getmtime(file_name)
75
+        if fts > ts:
76
+            ts = fts
77
+            found = file_name
78
+    print(found)
79
+
80
+    s = "/home/ubuntu/AI/InvoiceParser/upload_invoice"
81
+    s = os.listdir(s)
82
+    for file in s:
83
+        if file.endswith(".jpg"):
84
+            fname = (found)
85
+        elif file.endswith(".png"):
86
+            fname = (found)
87
+        elif file.endswith(".pdf"):
88
+            fname = (found)
89
+        elif file.endswith(".jpeg"):
90
+            fname = (found)
91
+        elif file.endswith(".JPEG"):
92
+            fname = (found)
93
+
94
+    def img_to_pdf():  # png to editable pdf conversion
95
+        pdf = pytesseract.image_to_pdf_or_hocr(fname, extension="pdf")
96
+        with open(
97
+                "/home/ubuntu/AI/InvoiceParser/upload_invoice/demo.pdf",
98
+                "w+b",
99
+        ) as f:
100
+            f.write(pdf)
101
+
102
+    if fname.endswith(".pdf"):
103
+        print()
104
+    else:
105
+        img_to_pdf()
106
+        fname = "/home/ubuntu/AI/InvoiceParser/upload_invoice/demo.pdf"
107
+
108
+    sourcepath = "/home/ubuntu/AI/InvoiceParser/upload_invoice"
109
+    sourcefiles = os.listdir(sourcepath)
110
+    destinationpath = "/home/ubuntu/AI/InvoiceParser/uploads"
111
+    for file in sourcefiles:
112
+        if file.endswith(".pdf"):
113
+            shutil.copy2(
114
+                os.path.join(sourcepath, file), os.path.join(destinationpath, file)
115
+            )
116
+    os.chdir("/home/ubuntu/AI/InvoiceParser/uploads")
117
+    print(os.getcwd())
118
+    print("file name conerted to o.pdf")
119
+    for count, f in enumerate(os.listdir()):
120
+        f_name, f_ext = os.path.splitext(f)
121
+        f_name = "" + str(count)
122
+        new_name = f"{f_name}{f_ext}"
123
+        os.rename(f, new_name)
124
+
125
+        import spacy
126
+        import sys
127
+        import fitz
128
+
129
+        fname = "/home/ubuntu/AI/InvoiceParser/uploads/0.pdf"
130
+        doc = fitz.open(fname)
131
+        text = ""
132
+        for page in doc:
133
+            text = text + str(page.get_text())
134
+
135
+        fitz = " ".join(text.split("\n"))
136
+        # print(fitz)
137
+
138
+        import pandas as pd
139
+
140
+        doc = nlp_model1(fitz)
141
+        k = []
142
+        l = []
143
+        for ent in doc.ents:
144
+            # print(f"{ent.label_.upper():{30}}- {ent.text}")
145
+            k.append(ent.label_.upper())
146
+            l.append(ent.text)
147
+
148
+        columns = k
149
+        rows = [l]
150
+        data = pd.DataFrame(rows, columns=columns)
151
+        df = data
152
+
153
+        df = data.T
154
+
155
+        df.to_csv("/home/ubuntu/AI/InvoiceParser/Invoice.csv")
156
+
157
+        import pandas as pd
158
+        df = pd.read_csv("/home/ubuntu/AI/InvoiceParser/Invoice.csv")
159
+        # df.head()
160
+        # df = df.T
161
+        # new_header = df.iloc[0]  # grab the first row for the header
162
+        # df = df[1:]  # take the data less the header row
163
+        # df.columns = new_header
164
+
165
+        # def df_column_uniquify(df):
166
+        #     df_columns = df.columns
167
+        #     new_columns = []
168
+        #     for item in df_columns:
169
+        #         counter = 0
170
+        #         newitem = item
171
+        #         while newitem in new_columns:
172
+        #             counter += 1
173
+        #             newitem = "{}_{}".format(item, counter)
174
+        #         new_columns.append(newitem)
175
+        #     df.columns = new_columns
176
+        #     return df.T
177
+
178
+        # df = df_column_uniquify(df)
179
+        # # df=df.T
180
+        # df.to_csv('/home/ubuntu/AI/InvoiceParser/final.csv')
181
+        #df = pd.read_csv('/home/ubuntu/AI/InvoiceParser/final.csv')
182
+        df.rename({df.columns[-2]: 'Key'}, axis=1, inplace=True)
183
+        df.rename({df.columns[-1]: 'Values'}, axis=1, inplace=True)
184
+        df['Key'] = df['Key'].str.replace('/', '')
185
+        df['Key'] = df['Key'].str.replace(' ', '')
186
+        df.to_csv('/home/ubuntu/AI/InvoiceParser/final.csv', index=False)
187
+        import pandas as pd
188
+        x1 = pd.read_csv('/home/ubuntu/AI/InvoiceParser/final.csv')
189
+        tp = pd.read_csv('/home/ubuntu/AI/InvoiceParser/finalwithcolen.csv')
190
+        merge = pd.merge(x1, tp, on='Key', how='right')
191
+        merge1 = merge
192
+        merge = merge['Values'].str.split(":", expand=True)
193
+        merge.rename({merge.columns[-1]: 'Values'}, axis=1, inplace=True)
194
+        frames = [merge1['Key'], merge['Values']]
195
+        result = pd.concat(frames, axis=1)
196
+
197
+        x1 = pd.read_csv('/home/ubuntu/AI/InvoiceParser/final.csv')
198
+        tp = pd.read_csv('/home/ubuntu/AI/InvoiceParser/finalwithoutcolen.csv')
199
+        merged = pd.merge(x1, tp, on='Key', how='right')
200
+        frames = [result, merged]
201
+        result1 = pd.concat(frames)
202
+        result1.to_csv('/home/ubuntu/AI/InvoiceParser/final1.csv', index=False)
203
+
204
+        x1 = pd.read_csv('/home/ubuntu/AI/InvoiceParser/main.csv')
205
+        tp = pd.read_csv('/home/ubuntu/AI/InvoiceParser/final1.csv')
206
+        tp['Key'] = tp['Key'].str.strip()
207
+        tp['Values'] = tp['Values'].str.strip()
208
+
209
+        merge = pd.merge(tp, x1, on='Key', how='right')
210
+        merge.to_csv('/home/ubuntu/AI/InvoiceParser/invoicewithouttable.csv', index=False)
211
+        df2 = pd.read_csv('/home/ubuntu/AI/InvoiceParser/invoicewithouttable.csv')
212
+
213
+        # Import writer class from csv module
214
+        from csv import writer
215
+
216
+
217
+        List=['PlantCode'," "]
218
+        with open('/home/ubuntu/AI/InvoiceParser/invoicewithouttable.csv', 'a') as f_object:
219
+            writer_object = writer(f_object)
220
+            writer_object.writerow(List)
221
+            f_object.close()
222
+        # print(df2)
223
+        df2 = pd.read_csv('/home/ubuntu/AI/InvoiceParser/invoicewithouttable.csv')
224
+        df2 = df2.T
225
+
226
+        df2.to_csv('/home/ubuntu/AI/InvoiceParser/invoicewithouttable.csv', index=False, header=False)
227
+
228
+        df1 = pd.read_csv('/home/ubuntu/AI/InvoiceParser/invoicewithouttable.csv')
229
+        df1.to_json('/home/ubuntu/AI/InvoiceParser/firstjson.json', orient="index")
230
+        import pandas as pd
231
+        x = pd.read_csv('/home/ubuntu/AI/InvoiceParser/final.csv')
232
+        tp = pd.read_csv('/home/ubuntu/AI/InvoiceParser/item1.csv')
233
+        x['Values'] = x['Values'].str.strip()
234
+        merge = pd.merge(tp, x, on='Key', how='inner')
235
+        merge = merge.groupby('Key').agg({
236
+            'Values': '/'.join,
237
+        }).reset_index()
238
+        z = merge['Values'].str.split('/', expand=True)
239
+        frames = [merge, z]
240
+        result1 = pd.concat(frames, axis=1)
241
+        result1 = result1.drop(['Values'], axis=1)
242
+        import pandas as pd
243
+        tp = pd.read_csv('/home/ubuntu/AI/InvoiceParser/item1.csv')
244
+        merge = pd.merge(tp, result1, on='Key', how='inner')
245
+        merge = merge.T
246
+        new_header = merge.iloc[0]  # grab the first row for the header
247
+        merge = merge[1:]  # take the data less the header row
248
+        merge.columns = new_header
249
+    
250
+        merge = merge.to_dict('records')
251
+        invoice_Item=merge
252
+
253
+
254
+        # import pandas as pd
255
+        # import json
256
+        # dflist = []
257
+        # x = pd.read_csv('/home/ubuntu/AI/InvoiceParser/item1.csv')
258
+        # tp = pd.read_csv('/home/ubuntu/AI/InvoiceParser/final.csv')
259
+        # tp['Key']=tp['Key'].str.strip()
260
+        # tp['Values']=tp['Values'].str.strip()
261
+        # # tp = tp.loc[:, ~tp.columns.str.contains('^Unnamed')]
262
+        # merge = pd.merge(x, tp, on='Key', how='left')
263
+        # merge.to_csv('/home/ubuntu/AI/InvoiceParser/invoicewithtable1.csv', index=False)
264
+
265
+        # dfPG = pd.read_csv('/home/ubuntu/AI/InvoiceParser/invoicewithtable1.csv')
266
+        # import numpy as np
267
+        # dfPG = dfPG.replace({np.nan: None})
268
+        # x2 = dfPG.iloc[:, -2].tolist()
269
+        # y2 = dfPG.iloc[:, -1].tolist()
270
+        # z1 = dict(zip(x2, y2))
271
+        # dflist.append(z1)
272
+        # # u1 = json.dumps(z1)
273
+        # import pandas as pd
274
+
275
+        # x = pd.read_csv('/home/ubuntu/AI/InvoiceParser/item2.csv')
276
+        # tp = pd.read_csv('/home/ubuntu/AI/InvoiceParser/final.csv')
277
+        # tp['Key']=tp['Key'].str.strip()
278
+        # tp['Values']=tp['Values'].str.strip()
279
+        # # tp = tp.loc[:, ~tp.columns.str.contains('^Unnamed')]
280
+        # merge = pd.merge(x, tp, on='Key', how='left')
281
+        # merge.to_csv('/home/ubuntu/AI/InvoiceParser/invoicewithtable2.csv', index=False)
282
+
283
+        # dfUG = pd.read_csv('/home/ubuntu/AI/InvoiceParser/invoicewithtable2.csv')
284
+        # import numpy as np
285
+        # dfUG = dfUG.replace({np.nan: None})
286
+        # x2 = dfUG.iloc[:, -2].tolist()
287
+        # y2 = dfUG.iloc[:, -1].tolist()
288
+        # z2 = dict(zip(x2, y2))
289
+        # dflist.append(z2)
290
+        # u2 = json.dumps(z2)
291
+        # final = '[' + str(z1) + ',' + str(z2) + ']'
292
+        # return render_template('resume.html')
293
+        ############################################Document############################################################
294
+
295
+        import base64
296
+        empty = []
297
+        name = found
298
+        image = open(name, 'rb')
299
+        image_read = image.read()
300
+        image_64_encode = base64.b64encode(image_read)
301
+        NULL = 'null'
302
+        # empty.append("ByteData--" + (NULL).strip('""'))
303
+        image_64_encode = image_64_encode.decode('utf-8')
304
+        empty.append("FileData--" + str(image_64_encode))
305
+        imagedata = name.split("/")
306
+        imagename = str(imagedata[-1]).replace('"', '').replace("[", "").replace("]", "")
307
+        imagename1 = str(imagename).split('.')
308
+
309
+        imagename = str(imagename1[-2]).replace("[", "]")
310
+        empty.append("FileName--" + imagename)
311
+        empty.append("FilePath--" + name)
312
+        imageExtension = str(imagename1[-1]).replace("[", "]")
313
+        empty.append("FileType--" + imageExtension)
314
+
315
+        import pandas as pd
316
+        df = pd.DataFrame(empty)
317
+        df = df[0].str.split("--", expand=True)
318
+        data1 = pd.DataFrame(df[0])
319
+        data2 = pd.DataFrame(df[1])
320
+        dt = data2.set_index(data1[0])
321
+
322
+        dt4 = dt.T
323
+        list = []
324
+        dictionary = dt4.to_dict(orient="index")
325
+
326
+        a = {
327
+            "FileId": 0,
328
+            "FileData": "",
329
+            "FileName": "",
330
+            "FileType": "",
331
+            "RefId": 0
332
+        }
333
+        list = []
334
+
335
+        list.append(a)
336
+        list.append(dictionary[1])
337
+
338
+        import json
339
+
340
+        with open('/home/ubuntu/AI/InvoiceParser/firstjson.json', 'r') as json_file:
341
+            json_load = json.load(json_file)
342
+
343
+            # url = "https://test.bizgaze.app:8443/apis/v4/bizgaze/integrations/businesscards/create"
344
+
345
+        nothing = json.dumps(json_load).replace("]", "").replace("[", "").replace('{"0":', '').replace('}}', '}')
346
+        import json
347
+
348
+        # JSON data:
349
+        x = nothing
350
+
351
+        # python object to be appended
352
+        y = {"InvoiceItems":invoice_Item}
353
+        y1 = {"Document": list}
354
+
355
+        # parsing JSON string:
356
+        z = json.loads(x)
357
+
358
+        # appending the data
359
+        z.update(y)
360
+        z.update(y1)
361
+
362
+        # the result is a JSON string:
363
+        # print(json.dumps(z))
364
+        # print('##########################')
365
+        # print(z)
366
+        # print('##########################')
367
+        import requests
368
+        import json
369
+
370
+        # with open('visitingcard1.json', 'r') as json_file:
371
+        #     json_load = json.load(json_file)
372
+        url = "https://test.bizgaze.app:8443/apis/v4/bizgaze/integrations/invoice/createsalesinvoice"
373
+        #url="https://test.bizgaze.app:8443/apis/v4/bizgaze/integrations/invoice/createsalesinvoice"
374
+        payload1 = json.dumps(z)
375
+        print('--------------------------------------------------------------------------')
376
+        print(payload1)
377
+        headers = {
378
+            'Authorization': 'stat 089166c35d4c4d7d941c99d6f8986834',
379
+            'Content-Type': 'application/json'
380
+        }
381
+        response = requests.request("POST", url, headers=headers, data=payload1)
382
+        print("##############################################################")
383
+        print(response.text)
384
+
385
+        import glob
386
+        files = glob.glob(
387
+            "/home/ubuntu/AI/InvoiceParser/upload_invoice/*"
388
+        )
389
+        for f in files:
390
+            os.remove(f)
391
+        files = glob.glob(
392
+            "/home/ubuntu/AI/InvoiceParser/uploads/*"
393
+        )
394
+        for f in files:
395
+            os.remove(f)
396
+
397
+        return payload1
398
+
399
+
400
+@app.route("/Download_invoice")
401
+def Download_invoice():
402
+    pass
403
+   
404
+
405
+@app.route("/Table")
406
+def Table():
407
+    pass
408
+
409
+
410
+
411
+@app.route('/upload_invoice', methods=["POST"])
412
+def upload_invoice():
413
+    if __name__ == "__main__":
414
+        url_list = []
415
+        Dataset = request.get_json()
416
+        # id = "100013660000125"
417
+        url_list.append(Dataset)
418
+        # multiprocessing
419
+        with multiprocessing.Pool(processes=30) as pool:
420
+            results = pool.map(predict, url_list)
421
+
422
+        pool.close()
423
+        return results[0]
424
+
425
+
426
+if __name__ == "__main__":
427
+    app.run(host='0.0.0.0', port=9797, debug=True)

+ 2
- 0
Invoice_parser/invoicewithouttable.csv 파일 보기

@@ -0,0 +1,2 @@
1
+SELLER NAME,SELLER ADDRESS,SELLER PH,SELLER EMAIL,SELLER GST,SELLER PAN,BUYER NAME,BUYER ADDRESS,BUYER GST,INVOICE NO,INVOICE DATE,SHIP TO,SHIP TO CONTACT,DESTINATION,TOTAL PACKS & QUANTITY,ASSESSABLE AMOUNT,AMOUNT IN WORDS,PAYMENT ON OR BEFORE,OUTPUT SGST,OUTPUT CGST,ROUND OFF,RTS POINTS EARNED,OVERALL RTS POINTS,GRAND TOTAL,OUR BANK DETAILS,BANK A/C NO,RTGS/IFSC
2
+Seller : SHREE TRADING CORPORATION,"H- 575A,ROAD NO.6, V.K.I AREA, Jaipur, Rajasthan - 302023",Phone: 9610566688,Email : stc.jaipur@gmail.com,GST No. : 08AAVPM5307P1ZV,PAN No. : AAVPM5307P,Buyer : Stonex India Pvt. Ltd.,"Khasra No. 210, Village - Rahimpura,Makrana Road Kishangarh, 305801",GST No. :08AACCG9620R1ZZ,Invoice No. : JPR21-22/XM01902,Date : 28 Jul 2021,Ship to : ,"Contact: Rajat Ji,9116616323",Destination Kishangarh,Total Quantity 2.00 416.00,Assessable Amount 87360.00,Amount in words : Rupees One Lakh Three Thousand Eighty Five only,Payment on or before Save To pay,Output SGST  ,7862.40 Output CGST  ,Round off 0.20,,,Grand Total 103085.00,Our Bank Details : Kotak Mahindra Bank,Bank A/c No. : 1412405384,RTGS / IFSC Code : KKBK0003540

+ 10
- 0
Invoice_parser/invoicewithtable1.csv 파일 보기

@@ -0,0 +1,10 @@
1
+Key,Values
2
+SI,1
3
+HSN,27101980
4
+PRODUCT,"MOBILGEAR 600 XP 680, 208LT, DR-"
5
+PACK SIZE,1.00
6
+QUANTITY,208.00 LTR
7
+RATE,240.00
8
+DISC,0.000
9
+TAX %,9%|9%
10
+AMOUNT,49920.00

+ 10
- 0
Invoice_parser/invoicewithtable2.csv 파일 보기

@@ -0,0 +1,10 @@
1
+Key,Implement,Values
2
+SI_1,SI,2
3
+HSN_1,HSN,27101980
4
+PRODUCT_1,PRODUCT,SPARTAN EP 150 DRUM 208L:IN-     
5
+PACK SIZE_1,PACK SIZE,1.00
6
+QUANTITY_1,QUANTITY,208.00 LTR
7
+RATE_1,RATE,180.00
8
+DISC_1,DISC,0.000
9
+TAX %_1,TAX %,9%|9%
10
+AMOUNT_1,AMOUNT,37440.00

+ 10
- 0
Invoice_parser/item1.csv 파일 보기

@@ -0,0 +1,10 @@
1
+Key
2
+SI
3
+HSN
4
+PRODUCT
5
+PACKSIZE
6
+QUANTITY
7
+RATE
8
+DISC
9
+TAX %
10
+AMOUNT

+ 10
- 0
Invoice_parser/item2.csv 파일 보기

@@ -0,0 +1,10 @@
1
+Key,Implement
2
+SI_1,SI
3
+HSN_1,HSN
4
+PRODUCT_1,PRODUCT
5
+PACK SIZE_1,PACK SIZE
6
+QUANTITY_1,QUANTITY
7
+RATE_1,RATE
8
+DISC_1,DISC
9
+TAX %_1,TAX %
10
+AMOUNT_1,AMOUNT

+ 28
- 0
Invoice_parser/main.csv 파일 보기

@@ -0,0 +1,28 @@
1
+Key
2
+SELLER NAME
3
+SELLER ADDRESS
4
+SELLER PH
5
+SELLER EMAIL
6
+SELLER GST
7
+SELLER PAN
8
+BUYER NAME
9
+BUYER ADDRESS
10
+BUYER GST
11
+INVOICE NO
12
+INVOICE DATE
13
+SHIP TO
14
+SHIP TO CONTACT
15
+DESTINATION
16
+TOTAL PACKS & QUANTITY
17
+ASSESSABLE AMOUNT
18
+AMOUNT IN WORDS
19
+PAYMENT ON OR BEFORE
20
+OUTPUT SGST
21
+OUTPUT CGST
22
+ROUND OFF
23
+RTS POINTS EARNED
24
+OVERALL RTS POINTS
25
+GRAND TOTAL
26
+OUR BANK DETAILS
27
+BANK A/C NO
28
+RTGS/IFSC

+ 1
- 0
Invoice_parser/p/meta.json 파일 보기

@@ -0,0 +1 @@
1
+{"lang":"en","name":"model","version":"0.0.0","spacy_version":">=2.2.4","description":"","author":"","email":"","url":"","license":"","vectors":{"width":0,"vectors":0,"keys":0,"name":"spacy_pretrained_vectors"},"pipeline":["ner"],"factories":{"ner":"ner"},"labels":{"ner":["AMOUNT","AMOUNT IN WORDS","ASSESSABLE AMOUNT","BANK A/C NO","BUYER ADDRESS","BUYER GST","BUYER NAME","BUYER PAN","DESTINATION","DISC","DISPATCHED THROUGH","GRAND TOTAL","HSN","INVOICE DATE","INVOICE NO","NARRATION","OUR BANK DETAILS","OUTPUT CGST","OUTPUT SGST","OVERALL RTS POINTS","Output IGST","PACK SIZE","PAYMENT ON OR BEFORE","PAYMENT TYPE","PAYMENT TYPE ","PO NO","PRODUCT","QUANTITY","RATE","ROUND OFF","RTGS/IFSC","RTGS/IFSC ","RTS POINTS EARNED","SELLER ADDRESS","SELLER EMAIL","SELLER GST","SELLER NAME","SELLER PAN","SELLER PH","SHIP TO","SHIP TO ADDRESS","SHIP TO CONTACT","SHIP TO GST","SI","TAX %","TOTAL PACKS & QUANTITY","TOTAL PACKS AND QUANTITY"]}}

+ 18
- 0
Invoice_parser/p/ner/cfg 파일 보기

@@ -0,0 +1,18 @@
1
+{
2
+  "beam_width":1,
3
+  "beam_density":0.0,
4
+  "beam_update_prob":1.0,
5
+  "cnn_maxout_pieces":3,
6
+  "nr_feature_tokens":6,
7
+  "nr_class":190,
8
+  "hidden_depth":1,
9
+  "token_vector_width":96,
10
+  "hidden_width":64,
11
+  "maxout_pieces":2,
12
+  "pretrained_vectors":null,
13
+  "bilstm_depth":0,
14
+  "self_attn_depth":0,
15
+  "conv_depth":4,
16
+  "conv_window":1,
17
+  "embed_size":2000
18
+}

BIN
Invoice_parser/p/ner/model 파일 보기


+ 1
- 0
Invoice_parser/p/ner/moves 파일 보기

@@ -0,0 +1 @@
1
+�¥movesÚ
d{"0":{},"1":{"SELLER NAME":-1,"SELLER ADDRESS":-2,"SELLER PH":-3,"SELLER EMAIL":-4,"SELLER GST":-5,"SELLER PAN":-6,"BUYER NAME":-7,"BUYER ADDRESS":-8,"BUYER GST":-9,"BUYER PAN":-10,"INVOICE NO":-11,"INVOICE DATE":-12,"PO NO":-13,"SHIP TO":-14,"SHIP TO ADDRESS":-15,"SHIP TO GST":-16,"SHIP TO CONTACT":-17,"DESTINATION":-18,"SI":-19,"HSN":-20,"PRODUCT":-21,"PACK SIZE":-22,"QUANTITY":-23,"RATE":-24,"DISC":-25,"TAX %":-26,"AMOUNT":-27,"TOTAL PACKS & QUANTITY":-28,"ASSESSABLE AMOUNT":-29,"AMOUNT IN WORDS":-30,"PAYMENT ON OR BEFORE":-31,"PAYMENT TYPE":-32,"OUTPUT SGST":-33,"OUTPUT CGST":-34,"ROUND OFF":-35,"GRAND TOTAL":-36,"OUR BANK DETAILS":-37,"BANK A/C NO":-38,"RTGS/IFSC":-39,"RTS POINTS EARNED":-40,"OVERALL RTS POINTS":-41,"NARRATION":-42,"DISPATCHED THROUGH":-43,"TOTAL PACKS AND QUANTITY":-44,"PAYMENT TYPE ":-45,"Output IGST":-46,"RTGS/IFSC ":-47},"2":{"SELLER NAME":-1,"SELLER ADDRESS":-2,"SELLER PH":-3,"SELLER EMAIL":-4,"SELLER GST":-5,"SELLER PAN":-6,"BUYER NAME":-7,"BUYER ADDRESS":-8,"BUYER GST":-9,"BUYER PAN":-10,"INVOICE NO":-11,"INVOICE DATE":-12,"PO NO":-13,"SHIP TO":-14,"SHIP TO ADDRESS":-15,"SHIP TO GST":-16,"SHIP TO CONTACT":-17,"DESTINATION":-18,"SI":-19,"HSN":-20,"PRODUCT":-21,"PACK SIZE":-22,"QUANTITY":-23,"RATE":-24,"DISC":-25,"TAX %":-26,"AMOUNT":-27,"TOTAL PACKS & QUANTITY":-28,"ASSESSABLE AMOUNT":-29,"AMOUNT IN WORDS":-30,"PAYMENT ON OR BEFORE":-31,"PAYMENT TYPE":-32,"OUTPUT SGST":-33,"OUTPUT CGST":-34,"ROUND OFF":-35,"GRAND TOTAL":-36,"OUR BANK DETAILS":-37,"BANK A/C NO":-38,"RTGS/IFSC":-39,"RTS POINTS EARNED":-40,"OVERALL RTS POINTS":-41,"NARRATION":-42,"DISPATCHED THROUGH":-43,"TOTAL PACKS AND QUANTITY":-44,"PAYMENT TYPE ":-45,"Output IGST":-46,"RTGS/IFSC ":-47},"3":{"SELLER NAME":-1,"SELLER ADDRESS":-2,"SELLER PH":-3,"SELLER EMAIL":-4,"SELLER GST":-5,"SELLER PAN":-6,"BUYER NAME":-7,"BUYER ADDRESS":-8,"BUYER GST":-9,"BUYER PAN":-10,"INVOICE NO":-11,"INVOICE DATE":-12,"PO NO":-13,"SHIP TO":-14,"SHIP TO ADDRESS":-15,"SHIP TO GST":-16,"SHIP TO CONTACT":-17,"DESTINATION":-18,"SI":-19,"HSN":-20,"PRODUCT":-21,"PACK SIZE":-22,"QUANTITY":-23,"RATE":-24,"DISC":-25,"TAX %":-26,"AMOUNT":-27,"TOTAL PACKS & QUANTITY":-28,"ASSESSABLE AMOUNT":-29,"AMOUNT IN WORDS":-30,"PAYMENT ON OR BEFORE":-31,"PAYMENT TYPE":-32,"OUTPUT SGST":-33,"OUTPUT CGST":-34,"ROUND OFF":-35,"GRAND TOTAL":-36,"OUR BANK DETAILS":-37,"BANK A/C NO":-38,"RTGS/IFSC":-39,"RTS POINTS EARNED":-40,"OVERALL RTS POINTS":-41,"NARRATION":-42,"DISPATCHED THROUGH":-43,"TOTAL PACKS AND QUANTITY":-44,"PAYMENT TYPE ":-45,"Output IGST":-46,"RTGS/IFSC ":-47},"4":{"":1,"SELLER NAME":-1,"SELLER ADDRESS":-2,"SELLER PH":-3,"SELLER EMAIL":-4,"SELLER GST":-5,"SELLER PAN":-6,"BUYER NAME":-7,"BUYER ADDRESS":-8,"BUYER GST":-9,"BUYER PAN":-10,"INVOICE NO":-11,"INVOICE DATE":-12,"PO NO":-13,"SHIP TO":-14,"SHIP TO ADDRESS":-15,"SHIP TO GST":-16,"SHIP TO CONTACT":-17,"DESTINATION":-18,"SI":-19,"HSN":-20,"PRODUCT":-21,"PACK SIZE":-22,"QUANTITY":-23,"RATE":-24,"DISC":-25,"TAX %":-26,"AMOUNT":-27,"TOTAL PACKS & QUANTITY":-28,"ASSESSABLE AMOUNT":-29,"AMOUNT IN WORDS":-30,"PAYMENT ON OR BEFORE":-31,"PAYMENT TYPE":-32,"OUTPUT SGST":-33,"OUTPUT CGST":-34,"ROUND OFF":-35,"GRAND TOTAL":-36,"OUR BANK DETAILS":-37,"BANK A/C NO":-38,"RTGS/IFSC":-39,"RTS POINTS EARNED":-40,"OVERALL RTS POINTS":-41,"NARRATION":-42,"DISPATCHED THROUGH":-43,"TOTAL PACKS AND QUANTITY":-44,"PAYMENT TYPE ":-45,"Output IGST":-46,"RTGS/IFSC ":-47},"5":{"":1}}

+ 4
- 0
Invoice_parser/p/tokenizer
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


+ 1
- 0
Invoice_parser/p/vocab/key2row 파일 보기

@@ -0,0 +1 @@
1
+�

BIN
Invoice_parser/p/vocab/lexemes.bin 파일 보기


+ 6111
- 0
Invoice_parser/p/vocab/strings.json
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


BIN
Invoice_parser/p/vocab/vectors 파일 보기


+ 87
- 0
Invoice_parser/requirementsinvoice.txt 파일 보기

@@ -0,0 +1,87 @@
1
+asgiref==3.5.0
2
+backports.zoneinfo==0.2.1
3
+blis==0.7.5
4
+camelot-py==0.10.1
5
+catalogue==1.0.0
6
+certifi==2021.10.8
7
+cffi==1.15.0
8
+chardet==4.0.0
9
+charset-normalizer==2.0.9
10
+ci-info==0.2.0
11
+click==8.0.3
12
+coloredlogs==15.0.1
13
+configobj==5.0.6
14
+configparser==5.2.0
15
+cryptography==36.0.1
16
+cycler==0.11.0
17
+cymem==2.0.6
18
+Django==4.0.3
19
+docx==0.2.4
20
+docx2txt==0.8
21
+et-xmlfile==1.1.0
22
+etelemetry==0.2.2
23
+filelock==3.4.2
24
+fitz==0.0.1.dev2
25
+Flask==2.0.2
26
+fonttools==4.28.5
27
+future==0.18.2
28
+ghostscript==0.7
29
+httplib2==0.20.2
30
+humanfriendly==10.0
31
+idna==3.3
32
+image==1.5.33
33
+img2pdf==0.4.3
34
+importlib-resources==5.4.0
35
+isodate==0.6.1
36
+itsdangerous==2.0.1
37
+Jinja2==3.0.3
38
+kiwisolver==1.3.2
39
+lxml==4.7.1
40
+MarkupSafe==2.0.1
41
+matplotlib==3.5.1
42
+murmurhash==1.0.6
43
+networkx==2.6.3
44
+nibabel==3.2.1
45
+nipype==1.7.0
46
+numpy==1.22.0
47
+ocrmypdf==13.4.1
48
+opencv-python==4.5.5.62
49
+openpyxl==3.0.9
50
+packaging==21.3
51
+pandas==1.3.5
52
+pathlib==1.0.1
53
+pdfminer.six==20211012
54
+pikepdf==5.1.0
55
+Pillow==9.0.0
56
+plac==1.1.3
57
+pluggy==1.0.0
58
+preshed==3.0.6
59
+prov==2.0.0
60
+pycparser==2.21
61
+pydot==1.4.2
62
+PyMuPDF==1.19.4
63
+pyparsing==3.0.6
64
+PyPDF2==1.26.0
65
+pytesseract==0.3.8
66
+python-dateutil==2.8.2
67
+python-docx==0.8.11
68
+pytz==2021.3
69
+pyxnat==1.4
70
+rdflib==6.1.1
71
+reportlab==3.6.8
72
+requests==2.26.0
73
+scipy==1.7.3
74
+simplejson==3.17.6
75
+six==1.16.0
76
+spacy==2.3.5
77
+sqlparse==0.4.2
78
+srsly==1.0.5
79
+tabulate==0.8.9
80
+thinc==7.4.5
81
+tqdm==4.62.3
82
+traits==6.3.2
83
+urllib3==1.26.7
84
+wasabi==0.9.0
85
+Werkzeug==2.0.2
86
+xlrd==1.2.0
87
+zipp==3.7.0

+ 92
- 0
Invoice_parser/templates/Error.html 파일 보기

@@ -0,0 +1,92 @@
1
+
2
+<html>
3
+<head>
4
+
5
+<style>
6
+
7
+.button {
8
+  background-color: #000000; /* Green */
9
+  border: none;
10
+  color: white;
11
+  padding: 15px 32px;
12
+  text-align: center;
13
+  text-decoration: none;
14
+  display: inline-block;
15
+  margin: 4px 2px;
16
+  cursor: pointer;
17
+}
18
+
19
+
20
+.button1 {font-size: 10px;}
21
+.button2 {font-size: 12px;}
22
+.button3 {font-size: 16px;}
23
+.button4 {font-size: 50px;}
24
+.button5 {font-size: 24px;border-radius: 12px;}
25
+
26
+ body {
27
+	width: 100%;
28
+	height:100%;
29
+	font-family: 'Helvetica';
30
+	background-color:#000000;
31
+	color: #fff;
32
+	font-size: 24px;
33
+	text-align:center;
34
+	letter-spacing:1.4px;
35
+	}
36
+.dropbtn {
37
+  background-color: #04AA6D;
38
+  color: white;
39
+  padding: 16px;
40
+  font-size: 16px;
41
+  border: none;
42
+}
43
+
44
+.dropdown {
45
+  position: relative;
46
+  display: inline-block;
47
+}
48
+
49
+.dropdown-content {
50
+  display: none;
51
+  position: absolute;
52
+  background-color: #f1f1f1;
53
+  min-width: 160px;
54
+  box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
55
+  z-index: 1;
56
+}
57
+
58
+.dropdown-content a {
59
+  color: black;
60
+  padding: 12px 16px;
61
+  text-decoration: none;
62
+  display: block;
63
+}
64
+
65
+.dropdown-content a:hover {background-color: #ddd;}
66
+
67
+.dropdown:hover .dropdown-content {display: block;}
68
+
69
+.dropdown:hover .dropbtn {background-color: #3e8e41;}
70
+</style>
71
+</head>
72
+<body>
73
+<div style="background-image: url('https://lh3.googleusercontent.com/p/AF1QipONWF8G50u9Bu-dklcj3kzesofOn8Z0q0LdHeU1=w1080-h608-p-no-v0');
74
+   /* Full height */
75
+  height: 100%;
76
+    / Center and scale the image nicely /
77
+  background-position: center;
78
+  background-repeat: no-repeat;
79
+  background-size: cover;">
80
+<br>
81
+<br>
82
+  <br>
83
+  <br>
84
+    <br>  <br>
85
+      <a  class="button button5"href="/">RETURN HOME</a><br><br><br>
86
+      
87
+	<h3 class="button button3">Invalid File Input </h3>
88
+  </div>
89
+</div>
90
+
91
+</body>
92
+</html>

+ 97
- 0
Invoice_parser/templates/home.html 파일 보기

@@ -0,0 +1,97 @@
1
+
2
+<html>
3
+<head>
4
+
5
+<style>
6
+
7
+.button {
8
+  background-color: #000000; /* Green */
9
+  border: none;
10
+  color: white;
11
+  padding: 15px 32px;
12
+  text-align: center;
13
+  text-decoration: none;
14
+  display: inline-block;
15
+  margin: 4px 2px;
16
+  cursor: pointer;
17
+}
18
+
19
+
20
+.button1 {font-size: 10px;}
21
+.button2 {font-size: 12px;}
22
+.button3 {font-size: 16px;}
23
+.button4 {font-size: 50px;}
24
+.button5 {font-size: 24px;border-radius: 12px;}
25
+
26
+ body {
27
+	width: 100%;
28
+	height:100%;
29
+	font-family: 'Helvetica';
30
+	background-color:#000000;
31
+	color: #fff;
32
+	font-size: 24px;
33
+	text-align:center;
34
+	letter-spacing:1.4px;
35
+	}
36
+.dropbtn {
37
+  background-color: #04AA6D;
38
+  color: white;
39
+  padding: 16px;
40
+  font-size: 16px;
41
+  border: none;
42
+}
43
+
44
+.dropdown {
45
+  position: relative;
46
+  display: inline-block;
47
+}
48
+
49
+.dropdown-content {
50
+  display: none;
51
+  position: absolute;
52
+  background-color: #f1f1f1;
53
+  min-width: 160px;
54
+  box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
55
+  z-index: 1;
56
+}
57
+
58
+.dropdown-content a {
59
+  color: black;
60
+  padding: 12px 16px;
61
+  text-decoration: none;
62
+  display: block;
63
+}
64
+
65
+.dropdown-content a:hover {background-color: #ddd;}
66
+
67
+.dropdown:hover .dropdown-content {display: block;}
68
+
69
+.dropdown:hover .dropbtn {background-color: #3e8e41;}
70
+</style>
71
+</head>
72
+<body>
73
+<div style="background-image: url('https://lh3.googleusercontent.com/p/AF1QipONWF8G50u9Bu-dklcj3kzesofOn8Z0q0LdHeU1=w1080-h608-p-no-v0');
74
+   /* Full height */
75
+  height: 100%;
76
+    / Center and scale the image nicely /
77
+  background-position: center;
78
+  background-repeat: no-repeat;
79
+  background-size: cover;">
80
+<br>
81
+<br>
82
+  <br>
83
+<h2 class="button button5">Bizgaze Limited</h2><br>
84
+  <br>
85
+
86
+
87
+<div class="dropdown">
88
+  <button class="button button5" >Parser</button>
89
+  <div class="dropdown-content">
90
+    <a href="/resume">  RESUME  </a>
91
+    <a href="/invoice">  INVOICE  </a>
92
+
93
+  </div>
94
+</div>
95
+
96
+</body>
97
+</html>

+ 85
- 0
Invoice_parser/templates/invoice.html 파일 보기

@@ -0,0 +1,85 @@
1
+<html>
2
+
3
+<head>
4
+    <style>
5
+.button {
6
+  background-color: #000000; /* Green */
7
+  border: none;
8
+  color: white;
9
+  padding: 15px 32px;
10
+  text-align: center;
11
+  text-decoration: none;
12
+  display: inline-block;
13
+  margin: 4px 2px;
14
+  cursor: pointer;
15
+}
16
+
17
+
18
+.button1 {font-size: 10px;}
19
+.button2 {font-size: 12px;}
20
+.button3 {font-size: 16px;}
21
+.button4 {font-size: 50px;}
22
+.button5 {font-size: 24px;border-radius: 12px;}
23
+
24
+</style>
25
+
26
+</head>
27
+
28
+<body>
29
+<div style="background-image: url('https://lh3.googleusercontent.com/p/AF1QipONWF8G50u9Bu-dklcj3kzesofOn8Z0q0LdHeU1=w1080-h608-p-no-v0');
30
+   /* Full height */
31
+  height: 100%;
32
+    / Center and scale the image nicely /
33
+  background-position: center;
34
+  background-repeat: no-repeat;
35
+  background-size: cover;">
36
+
37
+      <h1 class="button button5"> INVOICE PARSER </h1>
38
+
39
+    <br>  <br>
40
+      <a  class="button button5"href="/">HOME</a>
41
+
42
+
43
+
44
+<form action="/upload_invoice" method="POST" enctype="multipart/form-data">
45
+        <div class="form-group">
46
+
47
+
48
+          <div class="custom-file">
49
+            <input type="file" class="button button5" name="image" id="image" accept=".jpg,.png,.jpeg,.JPEG,.pdf"> <br> <br><br>
50
+<div class="bg"></div>
51
+  <input class="button button5" type="submit">
52
+
53
+          </div>
54
+        </div>
55
+    </form>
56
+   <form action="{{ url_for('Download_invoice') }}" method="GET">
57
+       <br><br>  <input type="submit" class="button button5" value="Download"></form>
58
+      <form action="{{ url_for('Table') }}" method="GET">
59
+       <br><br>  <input type="submit" class="button button5" value="Table"></form>
60
+
61
+
62
+
63
+
64
+
65
+
66
+
67
+<style>
68
+
69
+   body {
70
+	width: 100%;
71
+	height:100%;
72
+	font-family: 'Helvetica';
73
+	background-color:#000000;
74
+	color: #fff;
75
+	font-size: 24px;
76
+	text-align:center;
77
+	letter-spacing:1.4px;
78
+
79
+}
80
+  </style>
81
+
82
+
83
+
84
+</body>
85
+</html>

+ 82
- 0
Invoice_parser/templates/resume.html 파일 보기

@@ -0,0 +1,82 @@
1
+<html>
2
+
3
+<head>
4
+    <style>
5
+.button {
6
+  background-color: #000000; /* Green */
7
+  border: none;
8
+  color: white;
9
+  padding: 15px 32px;
10
+  text-align: center;
11
+  text-decoration: none;
12
+  display: inline-block;
13
+  margin: 4px 2px;
14
+  cursor: pointer;
15
+}
16
+
17
+
18
+.button1 {font-size: 10px;}
19
+.button2 {font-size: 12px;}
20
+.button3 {font-size: 16px;}
21
+.button4 {font-size: 50px;}
22
+.button5 {font-size: 24px;border-radius: 12px;}
23
+
24
+</style>
25
+
26
+</head>
27
+
28
+<body>
29
+<div style="background-image: url('https://lh3.googleusercontent.com/p/AF1QipONWF8G50u9Bu-dklcj3kzesofOn8Z0q0LdHeU1=w1080-h608-p-no-v0');
30
+   /* Full height */
31
+  height: 100%;
32
+    / Center and scale the image nicely /
33
+  background-position: center;
34
+  background-repeat: no-repeat;
35
+  background-size: cover;">
36
+
37
+      <h1 class="button button5"> RESUME PARSER </h1>
38
+    <br>  <br>
39
+    <a  class="button button5"href="/">HOME</a>
40
+
41
+
42
+
43
+<form action="/upload_resume" method="POST" enctype="multipart/form-data">
44
+        <div class="form-group">
45
+
46
+
47
+          <div class="custom-file">
48
+            <input type="file" class="button button5" name="image" id="image" accept=".doc,.docx,.pdf"> <br> <br><br>
49
+<div class="bg"></div>
50
+  <input class="button button5" type="submit">
51
+
52
+          </div>
53
+        </div>
54
+    </form>
55
+   <form action="{{ url_for('Download_resume') }}" method="GET">
56
+       <br><br>  <input type="submit" class="button button5" value="Download"></form>
57
+
58
+
59
+
60
+
61
+
62
+
63
+
64
+<style>
65
+
66
+   body {
67
+	width: 100%;
68
+	height:100%;
69
+	font-family: 'Helvetica';
70
+	background-color:#000000;
71
+	color: #fff;
72
+	font-size: 24px;
73
+	text-align:center;
74
+	letter-spacing:1.4px;
75
+
76
+}
77
+  </style>
78
+
79
+
80
+
81
+</body>
82
+</html>

BIN
Invoice_parser/upload_invoice/301.pdf 파일 보기


BIN
Invoice_parser/uploads/0.pdf 파일 보기


+ 8
- 0
Resume_parser/AD.csv 파일 보기

@@ -0,0 +1,8 @@
1
+Key,Values
2
+NAME,M.Amrutha
3
+EXPERIENCE,"Professional Experience Project Title #1 INVENTORY MANAGEMENT SYSTEM Work Place Himansu It Services PVT.LTD. Work Location Hyderabad Technologies C#.net,sqlserver-2008 Operating System Microsoft Windows 10 Role Developer Team Size 2 Project Details This window based stock system software used in any kind of shop like any grocery shop, any electrical accessory shop. We can use this software for maintain stock inventory records and sales, order, payment records on daily and monthly basis. The stock management software generate stock reports, sales reports, payment reports all types of reports which are necessary for mange our stock will generate in this system. We also provide facilities to order cancellation, returns faulty stock to company or dealer, maintain stock company bill and company payment detail. Maintain stock record like stock, sell record, order record and maintain inventory on daily and monthly basis. Responsibility Development Coding and Design. Project Title #2 Sales Invoice Work Place Himansu It Services PVT.LTD Work Location Hyderabad Technologies Windows Application C#,NET,Postgresql Operating System Microsoft Windows 8 Role Developer Team Size 2 Project Details This is a windows Forms Application the user wiil create Crystal Reports create sales invoice/receipt using crystal report in c# windows application with the stored procedure..In order to implement daily reports as sales reports, and stock reports in c# windows application”. Responsibility Development Coding and Design. Generate Crystal Reports"
4
+EMAIL,ammu.raby@gmail.com
5
+MOBILE NUMBER,Contact:9110317873
6
+EXPERIENCE,"Professional Experience Project Title #1 INVENTORY MANAGEMENT SYSTEM Work Place Himansu It Services PVT.LTD. Work Location Hyderabad Technologies C#.net,sqlserver-2008 Operating System Microsoft Windows 10 Role Developer Team Size 2 Project Details This window based stock system software used in any kind of shop like any grocery shop, any electrical accessory shop. We can use this software for maintain stock inventory records and sales, order, payment records on daily and monthly basis. The stock management software generate stock reports, sales reports, payment reports all types of reports which are necessary for mange our stock will generate in this system. We also provide facilities to order cancellation, returns faulty stock to company or dealer, maintain stock company bill and company payment detail. Maintain stock record like stock, sell record, order record and maintain inventory on daily and monthly basis. Responsibility Development Coding and Design. Project Title #2 Sales Invoice Work Place Himansu It Services PVT.LTD Work Location Hyderabad Technologies Windows Application C#,NET,Postgresql Operating System Microsoft Windows 8 Role Developer Team Size 2 Project Details This is a windows Forms Application the user wiil create Crystal Reports create sales invoice/receipt using crystal report in c# windows application with the stored procedure..In order to implement daily reports as sales reports, and stock reports in c# windows application”. Responsibility Development Coding and Design. Generate Crystal Reports"
7
+SKILLS,"Technical Skills Microsoft Technologies : ASP.NET, C#.NET, ADO.NET, MVC 4.0. Languages : C-sharp. Scripting Languages :Java Script, Jquery Database : SQL Server 2008, 2012. IDE : VisualStudio.NET 2010, 2012, 2013."
8
+SOCIAL NETWORKS,

+ 9
- 0
Resume_parser/AD11.csv 파일 보기

@@ -0,0 +1,9 @@
1
+Key
2
+NAME
3
+EXPERIENCE
4
+EMAIL
5
+MOBILE NUMBER
6
+EXPERIENCE
7
+SKILLS
8
+SOCIAL NETWORKS
9
+

+ 16
- 0
Resume_parser/Ad1.csv 파일 보기

@@ -0,0 +1,16 @@
1
+Key,Values
2
+NAME,M.Amrutha
3
+MOBILE NUMBER,Contact:9110317873
4
+EMAIL,ammu.raby@gmail.com
5
+SUMMARY,"Professional Summary Currently working as a Software Engineer in Himansu It Services PVT.LTD. Worked as Assistant professor in Ellenki group of Technology (Hyderabad)for 2 years(2015-2017) Over 1+ years of IT experience in various stages of Software Product Development involving Design, Support as well as customization, enhancements, modifications and corrections in existing software to meet client’s requirement. Experienced in development of Web based applications using Asp.Net, C #, JavaScript and SQL Server."
6
+SKILLS,"Technical Skills Microsoft Technologies : ASP.NET, C#.NET, ADO.NET, MVC 4.0. Languages : C-sharp. Scripting Languages :Java Script, Jquery Database : SQL Server 2008, 2012. IDE : VisualStudio.NET 2010, 2012, 2013."
7
+EXPERIENCE,"Professional Experience Project Title #1 INVENTORY MANAGEMENT SYSTEM Work Place Himansu It Services PVT.LTD. Work Location Hyderabad Technologies C#.net,sqlserver-2008 Operating System Microsoft Windows 10 Role Developer Team Size 2 Project Details This window based stock system software used in any kind of shop like any grocery shop, any electrical accessory shop. We can use this software for maintain stock inventory records and sales, order, payment records on daily and monthly basis. The stock management software generate stock reports, sales reports, payment reports all types of reports which are necessary for mange our stock will generate in this system. We also provide facilities to order cancellation, returns faulty stock to company or dealer, maintain stock company bill and company payment detail. Maintain stock record like stock, sell record, order record and maintain inventory on daily and monthly basis. Responsibility Development Coding and Design. Project Title #2 Sales Invoice Work Place Himansu It Services PVT.LTD Work Location Hyderabad Technologies Windows Application C#,NET,Postgresql Operating System Microsoft Windows 8 Role Developer Team Size 2 Project Details This is a windows Forms Application the user wiil create Crystal Reports create sales invoice/receipt using crystal report in c# windows application with the stored procedure..In order to implement daily reports as sales reports, and stock reports in c# windows application”. Responsibility Development Coding and Design. Generate Crystal Reports"
8
+EDUCATION,M.Tech in Software Engineering
9
+NAME OF INSTITUTE,"TRR Engineering College,"
10
+YEAR OF PASSING,2014
11
+UNIVERSITY/BOARD NAME,JNTUH
12
+SCORE,80%
13
+EDUCATION,B.Tech in Information Technology
14
+NAME OF INSTITUTE,Narayana Engineering College
15
+YEAR OF PASSING,2007
16
+UNIVERSITY/BOARD NAME,JNTUH

+ 7
- 0
Resume_parser/Ad2.csv 파일 보기

@@ -0,0 +1,7 @@
1
+Key,Values
2
+PG,M.Tech in Software Engineering
3
+PG_NAME_OF_INSTITUTE,TRR
4
+UG_YEAR_OF_PASSING,2014
5
+UG,B.Tech in Information Technology
6
+UG_NAME_OF_INSTITUTE,"Narayana Engineering College ,Gudur in"
7
+INTERMEDIATE_YEAR_OF_PASSING,2007

+ 1
- 0
Resume_parser/ME/meta.json 파일 보기

@@ -0,0 +1 @@
1
+{"lang":"en","name":"model","version":"0.0.0","spacy_version":">=2.2.4","description":"","author":"","email":"","url":"","license":"","vectors":{"width":0,"vectors":0,"keys":0,"name":"spacy_pretrained_vectors"},"pipeline":["ner"],"factories":{"ner":"ner"},"labels":{"ner":["Achievements","Address","Certifications","Class","Company","Date of Birth","Designation","Duration","Education","Email","Experience","Gender","Hobbies","Languages Known","Mobile Number","Name","Name of Institute","Score","Skills","Social networks","Summary","University/Board Name","Year of Passing"]}}

+ 18
- 0
Resume_parser/ME/ner/cfg 파일 보기

@@ -0,0 +1,18 @@
1
+{
2
+  "beam_width":1,
3
+  "beam_density":0.0,
4
+  "beam_update_prob":1.0,
5
+  "cnn_maxout_pieces":3,
6
+  "nr_feature_tokens":6,
7
+  "nr_class":94,
8
+  "hidden_depth":1,
9
+  "token_vector_width":96,
10
+  "hidden_width":64,
11
+  "maxout_pieces":2,
12
+  "pretrained_vectors":null,
13
+  "bilstm_depth":0,
14
+  "self_attn_depth":0,
15
+  "conv_depth":4,
16
+  "conv_window":1,
17
+  "embed_size":2000
18
+}

BIN
Resume_parser/ME/ner/model 파일 보기


+ 1
- 0
Resume_parser/ME/ner/moves 파일 보기

@@ -0,0 +1 @@
1
+�¥movesÚ0{"0":{},"1":{"Name":-1,"Email":-2,"Mobile Number":-3,"Experience":-4,"Designation":-5,"Education":-6,"Name of Institute":-7,"University/Board Name":-8,"Score":-9,"Year of Passing":-10,"Class":-11,"Skills":-12,"Gender":-13,"Duration":-14,"Summary":-15,"Social networks":-16,"Date of Birth":-17,"Address":-18,"Achievements":-19,"Languages Known":-20,"Hobbies":-21,"Company":-22,"Certifications":-23},"2":{"Name":-1,"Email":-2,"Mobile Number":-3,"Experience":-4,"Designation":-5,"Education":-6,"Name of Institute":-7,"University/Board Name":-8,"Score":-9,"Year of Passing":-10,"Class":-11,"Skills":-12,"Gender":-13,"Duration":-14,"Summary":-15,"Social networks":-16,"Date of Birth":-17,"Address":-18,"Achievements":-19,"Languages Known":-20,"Hobbies":-21,"Company":-22,"Certifications":-23},"3":{"Name":-1,"Email":-2,"Mobile Number":-3,"Experience":-4,"Designation":-5,"Education":-6,"Name of Institute":-7,"University/Board Name":-8,"Score":-9,"Year of Passing":-10,"Class":-11,"Skills":-12,"Gender":-13,"Duration":-14,"Summary":-15,"Social networks":-16,"Date of Birth":-17,"Address":-18,"Achievements":-19,"Languages Known":-20,"Hobbies":-21,"Company":-22,"Certifications":-23},"4":{"":1,"Name":-1,"Email":-2,"Mobile Number":-3,"Experience":-4,"Designation":-5,"Education":-6,"Name of Institute":-7,"University/Board Name":-8,"Score":-9,"Year of Passing":-10,"Class":-11,"Skills":-12,"Gender":-13,"Duration":-14,"Summary":-15,"Social networks":-16,"Date of Birth":-17,"Address":-18,"Achievements":-19,"Languages Known":-20,"Hobbies":-21,"Company":-22,"Certifications":-23},"5":{"":1}}

+ 4
- 0
Resume_parser/ME/tokenizer
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


+ 1
- 0
Resume_parser/ME/vocab/key2row 파일 보기

@@ -0,0 +1 @@
1
+�

BIN
Resume_parser/ME/vocab/lexemes.bin 파일 보기


+ 38152
- 0
Resume_parser/ME/vocab/strings.json
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
파일 보기


BIN
Resume_parser/ME/vocab/vectors 파일 보기


+ 4
- 0
Resume_parser/PG.csv 파일 보기

@@ -0,0 +1,4 @@
1
+Key,implement
2
+PG,QUALIFICATION
3
+PG_NAME_OF_INSTITUTE,NAMEOFINSTITUTE
4
+PG_YEAR_OF_PASSING,YEAROFPASSING

+ 4
- 0
Resume_parser/PGmerge.csv 파일 보기

@@ -0,0 +1,4 @@
1
+Key,implement,Values
2
+PG,QUALIFICATION,M.Tech in Software Engineering
3
+PG_NAME_OF_INSTITUTE,NAMEOFINSTITUTE,TRR
4
+PG_YEAR_OF_PASSING,YEAROFPASSING,

+ 5
- 0
Resume_parser/SSC.csv 파일 보기

@@ -0,0 +1,5 @@
1
+Key,implement
2
+SSC,QUALIFICATION
3
+SSC_NAME_OF_INSTITUTE,NAMEOFINSTITUTE
4
+SSC_YEAR_OF_PASSING,YEAROFPASSING
5
+SSC_SCORE,SCORE

+ 4
- 0
Resume_parser/UG.csv 파일 보기

@@ -0,0 +1,4 @@
1
+Key,implement
2
+UG,QUALIFACTION
3
+UG_NAME_OF_INSTITUTE,NAMEOFINSTITUTE
4
+UG_YEAR_OF_PASSING,YEAROFPASSING

+ 4
- 0
Resume_parser/UGmerge.csv 파일 보기

@@ -0,0 +1,4 @@
1
+Key,implement,Values
2
+UG,QUALIFACTION,B.Tech in Information Technology
3
+UG_NAME_OF_INSTITUTE,NAMEOFINSTITUTE,"Narayana Engineering College ,Gudur in"
4
+UG_YEAR_OF_PASSING,YEAROFPASSING,2014

+ 43
- 0
Resume_parser/bdeeducation_50_0.2/meta.json 파일 보기

@@ -0,0 +1,43 @@
1
+{
2
+  "lang":"en",
3
+  "name":"model",
4
+  "version":"0.0.0",
5
+  "spacy_version":">=2.3.5",
6
+  "description":"",
7
+  "author":"",
8
+  "email":"",
9
+  "url":"",
10
+  "license":"",
11
+  "spacy_git_version":"1d4b1dea2",
12
+  "vectors":{
13
+    "width":0,
14
+    "vectors":0,
15
+    "keys":0,
16
+    "name":"spacy_pretrained_vectors"
17
+  },
18
+  "pipeline":[
19
+    "ner"
20
+  ],
21
+  "factories":{
22
+    "ner":"ner"
23
+  },
24
+  "labels":{
25
+    "ner":[
26
+      "INTERMEDIATE",
27
+      "INTERMEDIATE_Name_of_Institute",
28
+      "INTERMEDIATE_Score",
29
+      "INTERMEDIATE_Year_of_Passing",
30
+      "PG",
31
+      "PG_Name_of_Institute",
32
+      "PG_Year_of_Passing",
33
+      "SSC",
34
+      "SSC_Name_of_Institute",
35
+      "SSC_Score",
36
+      "SSC_Year_of_Passing",
37
+      "UG",
38
+      "UG_Name_of_Institute",
39
+      "UG_Score",
40
+      "UG_Year_of_Passing"
41
+    ]
42
+  }
43
+}

+ 18
- 0
Resume_parser/bdeeducation_50_0.2/ner/cfg 파일 보기

@@ -0,0 +1,18 @@
1
+{
2
+  "beam_width":1,
3
+  "beam_density":0.0,
4
+  "beam_update_prob":1.0,
5
+  "cnn_maxout_pieces":3,
6
+  "nr_feature_tokens":6,
7
+  "nr_class":62,
8
+  "hidden_depth":1,
9
+  "token_vector_width":96,
10
+  "hidden_width":64,
11
+  "maxout_pieces":2,
12
+  "pretrained_vectors":null,
13
+  "bilstm_depth":0,
14
+  "self_attn_depth":0,
15
+  "conv_depth":4,
16
+  "conv_window":1,
17
+  "embed_size":2000
18
+}

BIN
Resume_parser/bdeeducation_50_0.2/ner/model 파일 보기


+ 1
- 0
Resume_parser/bdeeducation_50_0.2/ner/moves 파일 보기

@@ -0,0 +1 @@
1
+�¥movesÚ@{"0":{},"1":{"PG":-1,"PG_Name_of_Institute":-2,"PG_Year_of_Passing":-3,"UG":-4,"UG_Name_of_Institute":-5,"UG_Year_of_Passing":-6,"UG_Score":-7,"INTERMEDIATE":-8,"INTERMEDIATE_Score":-9,"INTERMEDIATE_Name_of_Institute":-10,"INTERMEDIATE_Year_of_Passing":-11,"SSC":-12,"SSC_Score":-13,"SSC_Name_of_Institute":-14,"SSC_Year_of_Passing":-15},"2":{"PG":-1,"PG_Name_of_Institute":-2,"PG_Year_of_Passing":-3,"UG":-4,"UG_Name_of_Institute":-5,"UG_Year_of_Passing":-6,"UG_Score":-7,"INTERMEDIATE":-8,"INTERMEDIATE_Score":-9,"INTERMEDIATE_Name_of_Institute":-10,"INTERMEDIATE_Year_of_Passing":-11,"SSC":-12,"SSC_Score":-13,"SSC_Name_of_Institute":-14,"SSC_Year_of_Passing":-15},"3":{"PG":-1,"PG_Name_of_Institute":-2,"PG_Year_of_Passing":-3,"UG":-4,"UG_Name_of_Institute":-5,"UG_Year_of_Passing":-6,"UG_Score":-7,"INTERMEDIATE":-8,"INTERMEDIATE_Score":-9,"INTERMEDIATE_Name_of_Institute":-10,"INTERMEDIATE_Year_of_Passing":-11,"SSC":-12,"SSC_Score":-13,"SSC_Name_of_Institute":-14,"SSC_Year_of_Passing":-15},"4":{"":1,"PG":-1,"PG_Name_of_Institute":-2,"PG_Year_of_Passing":-3,"UG":-4,"UG_Name_of_Institute":-5,"UG_Year_of_Passing":-6,"UG_Score":-7,"INTERMEDIATE":-8,"INTERMEDIATE_Score":-9,"INTERMEDIATE_Name_of_Institute":-10,"INTERMEDIATE_Year_of_Passing":-11,"SSC":-12,"SSC_Score":-13,"SSC_Name_of_Institute":-14,"SSC_Year_of_Passing":-15},"5":{"":1}}

+ 0
- 0
Resume_parser/bdeeducation_50_0.2/tokenizer 파일 보기


이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.

Loading…
취소
저장