No Description
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

app.py 3.5KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. import requests
  2. from flask import Flask, request, jsonify,render_template
  3. from flask_cors import CORS
  4. app = Flask(__name__)
  5. CORS(app)
  6. # import nltk
  7. # nltk.download('popular')
  8. # from nltk.stem import WordNetLemmatizer
  9. # lemmatizer = WordNetLemmatizer()
  10. # import pickle
  11. # import numpy as np
  12. # from keras.models import load_model
  13. # model = load_model('model.h5')
  14. # import json
  15. # import random
  16. # intents = json.loads(open('data.json').read())
  17. # words = pickle.load(open('texts.pkl','rb'))
  18. # classes = pickle.load(open('labels.pkl','rb'))
  19. # def clean_up_sentence(sentence):
  20. # # tokenize the pattern - split words into array
  21. # sentence_words = nltk.word_tokenize(sentence)
  22. # # stem each word - create short form for word
  23. # sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
  24. # return sentence_words
  25. # # return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
  26. # def bow(sentence, words, show_details=True):
  27. # # tokenize the pattern
  28. # sentence_words = clean_up_sentence(sentence)
  29. # # bag of words - matrix of N words, vocabulary matrix
  30. # bag = [0]*len(words)
  31. # for s in sentence_words:
  32. # for i,w in enumerate(words):
  33. # if w == s:
  34. # # assign 1 if current word is in the vocabulary position
  35. # bag[i] = 1
  36. # if show_details:
  37. # print ("found in bag: %s" % w)
  38. # return(np.array(bag))
  39. # def predict_class(sentence, model):
  40. # # filter out predictions below a threshold
  41. # p = bow(sentence, words,show_details=False)
  42. # res = model.predict(np.array([p]))[0]
  43. # ERROR_THRESHOLD = 0.25
  44. # results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
  45. # # sort by strength of probability
  46. # results.sort(key=lambda x: x[1], reverse=True)
  47. # return_list = []
  48. # for r in results:
  49. # return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
  50. # return return_list
  51. # def getResponse(ints, intents_json):
  52. # tag = ints[0]['intent']
  53. # list_of_intents = intents_json['intents']
  54. # for i in list_of_intents:
  55. # if(i['tag']== tag):
  56. # result = random.choice(i['responses'])
  57. # break
  58. # return result
  59. # def chatbot_response(msg):
  60. # ints = predict_class(msg, model)
  61. # res = getResponse(ints, intents)
  62. # return res
  63. app.static_folder = 'static'
  64. # Define the Rasa server URL
  65. rasa_server_url = "http://localhost:5005/webhooks/rest/webhook"
  66. @app.route("/")
  67. def home():
  68. return render_template("index.html")
  69. @app.route('/webhook', methods=['POST','GET'])
  70. def webhook():
  71. message = request.json['message']
  72. # message =request.args.get('msg')
  73. # Send the message to the Rasa server
  74. rasa_response = requests.post(rasa_server_url, json={"message": message}).json()
  75. # for d in rasa_response:
  76. # a=d["text"]
  77. # Return the Rasa response as a JSON object
  78. print(rasa_response)
  79. if len(rasa_response)==0:
  80. return jsonify([
  81. {
  82. "recipient_id": "default",
  83. "text": "I'm sorry, I didn't understand that. Can you please rephrase?"
  84. }])
  85. else:
  86. return jsonify(rasa_response)
  87. @app.route("/get")
  88. def get_bot_response():
  89. userText = request.args.get('msg')
  90. return chatbot_response(userText)
  91. if __name__ == '__main__':
  92. app.run(host='0.0.0.0',port=5020)