No Description
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

app.py 4.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. from flask import Flask, request, render_template
  2. # Import the relevant modules and functions from your code
  3. from langchain.llms import GooglePalm
  4. from langchain.llms import LlamaCpp
  5. from transformers import pipeline
  6. from langchain.llms import HuggingFacePipeline
  7. from langchain.embeddings import GooglePalmEmbeddings
  8. from langchain.document_loaders.csv_loader import CSVLoader
  9. from langchain.embeddings import HuggingFaceInstructEmbeddings
  10. from langchain.vectorstores import FAISS
  11. from langchain.prompts import PromptTemplate
  12. from langchain.chains import RetrievalQA
  13. from langchain.text_splitter import RecursiveCharacterTextSplitter
  14. import torch
  15. app = Flask(__name__)
  16. # Initialize Langchain components (put this code outside the app route)
  17. #api_key = 'AIzaSyCZwsYvr3ht7ctxcrOLWvfppySP33ducmE'
  18. #llm = GooglePalm(google_api_key=api_key, temperature=0.1)
  19. from langchain.llms import CTransformers
  20. llm = CTransformers(model=r"C:\Aiproject\mainmodel\llama-2-7b-chat.ggmlv3.q8_0.bin",model_type="llama",temperature=0.1,gpu_layers=50,do_sample=True)
  21. # llm = CTransformers(
  22. # model = r"C:\Aiproject\mistral\Mistral-7B-v0.1",
  23. # model_type="mistral",
  24. # max_new_tokens = 1048,
  25. # temperature = 0.1,gpu_layers=50
  26. # )
  27. # device=torch.device('cpu')
  28. # from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
  29. # checkpoint = "MBZUAI/LaMini-T5-738M"
  30. # tokenizer = AutoTokenizer.from_pretrained(checkpoint)
  31. # base_model = AutoModelForSeq2SeqLM.from_pretrained(
  32. # checkpoint,
  33. # device_map=device,
  34. # torch_dtype=torch.float32
  35. # )
  36. # pipe = pipeline(
  37. # 'text2text-generation',
  38. # model = base_model,
  39. # tokenizer = tokenizer,
  40. # max_length = 256,
  41. # do_sample = True,
  42. # temperature = 0.1,
  43. # top_p= 0.95
  44. # )
  45. # llm = HuggingFacePipeline(pipeline=pipe)
  46. # Initialize instructor embeddings using the Hugging Face model
  47. instructor_embeddings = HuggingFaceInstructEmbeddings(model_name=r"C:\Aiproject\SupportGpt\model")
  48. # Load the data from CSV
  49. loader = CSVLoader(file_path=r"C:\Aiproject\codebasics_faqs.csv", encoding='iso-8859-1', source_column="Question")
  50. data = loader.load()
  51. text_splitter=RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50)
  52. texts=text_splitter.split_documents(data)
  53. # Create a FAISS instance for vector database
  54. vectordb = FAISS.from_documents(documents=texts, embedding=instructor_embeddings)
  55. # Create a retriever for querying the vector database
  56. retriever = vectordb.as_retriever(score_threshold=0.7)
  57. # Define the prompt template
  58. # prompt_template = """Use the following pieces of information to answer the user's question,dont give additional answers.
  59. # If you don't know the answer, just say that you don't know, don't try to make up an answer.
  60. prompt_template="""Use the following peices of information to answer the user's question and do not give any additional or extra answers that do not exist in the given information.If the answer doesn't exist in the given information, say "I dont know". If the answer exists in the information, give the exact answer as it exists in the given information.Users may use different keywords or synonyms, so use at least 10 keywords or synonyms from the user's question to identify exactly what the user wants to know from given information.Please refrain from making your own answers or modifying the answers.
  61. CONTEXT: {context}
  62. QUESTION: {question}"""
  63. # Create a prompt template
  64. PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
  65. chain_type_kwargs = {"prompt": PROMPT}
  66. # Create the Langchain RetrievalQA chain
  67. chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, input_key="query",
  68. return_source_documents=True, chain_type_kwargs=chain_type_kwargs)
  69. @app.route('/', methods=['GET', 'POST'])
  70. def index():
  71. if request.method == 'POST':
  72. question = request.form['question']
  73. if question:
  74. result = chain(question)['result']
  75. else:
  76. result = None
  77. return render_template('index1.html', question=question, result=result)
  78. return render_template('index1.html', question='', result=None)
  79. if __name__ == '__main__':
  80. app.run(host="0.0.0.0",debug=False)