|
@@ -0,0 +1,110 @@
|
|
1
|
+from flask import Flask, request, render_template
|
|
2
|
+# Import the relevant modules and functions from your code
|
|
3
|
+from langchain.llms import GooglePalm
|
|
4
|
+from langchain.llms import LlamaCpp
|
|
5
|
+from transformers import pipeline
|
|
6
|
+from langchain.llms import HuggingFacePipeline
|
|
7
|
+from langchain.embeddings import GooglePalmEmbeddings
|
|
8
|
+from langchain.document_loaders.csv_loader import CSVLoader
|
|
9
|
+from langchain.embeddings import HuggingFaceInstructEmbeddings
|
|
10
|
+from langchain.vectorstores import FAISS
|
|
11
|
+from langchain.prompts import PromptTemplate
|
|
12
|
+from langchain.chains import RetrievalQA
|
|
13
|
+from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
14
|
+import torch
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+app = Flask(__name__)
|
|
18
|
+
|
|
19
|
+# Initialize Langchain components (put this code outside the app route)
|
|
20
|
+#api_key = 'AIzaSyCZwsYvr3ht7ctxcrOLWvfppySP33ducmE'
|
|
21
|
+#llm = GooglePalm(google_api_key=api_key, temperature=0.1)
|
|
22
|
+
|
|
23
|
+from langchain.llms import CTransformers
|
|
24
|
+
|
|
25
|
+llm = CTransformers(model=r"C:\Aiproject\mainmodel\llama-2-7b-chat.ggmlv3.q8_0.bin",model_type="llama",temperature=0.1,gpu_layers=50,do_sample=True)
|
|
26
|
+
|
|
27
|
+# llm = CTransformers(
|
|
28
|
+# model = r"C:\Aiproject\mistral\Mistral-7B-v0.1",
|
|
29
|
+# model_type="mistral",
|
|
30
|
+# max_new_tokens = 1048,
|
|
31
|
+# temperature = 0.1,gpu_layers=50
|
|
32
|
+# )
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+# device=torch.device('cpu')
|
|
36
|
+# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
|
37
|
+
|
|
38
|
+# checkpoint = "MBZUAI/LaMini-T5-738M"
|
|
39
|
+# tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
|
40
|
+# base_model = AutoModelForSeq2SeqLM.from_pretrained(
|
|
41
|
+# checkpoint,
|
|
42
|
+# device_map=device,
|
|
43
|
+# torch_dtype=torch.float32
|
|
44
|
+# )
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+# pipe = pipeline(
|
|
48
|
+# 'text2text-generation',
|
|
49
|
+# model = base_model,
|
|
50
|
+# tokenizer = tokenizer,
|
|
51
|
+# max_length = 256,
|
|
52
|
+# do_sample = True,
|
|
53
|
+# temperature = 0.1,
|
|
54
|
+# top_p= 0.95
|
|
55
|
+# )
|
|
56
|
+# llm = HuggingFacePipeline(pipeline=pipe)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+# Initialize instructor embeddings using the Hugging Face model
|
|
63
|
+instructor_embeddings = HuggingFaceInstructEmbeddings(model_name=r"C:\Aiproject\SupportGpt\model")
|
|
64
|
+
|
|
65
|
+# Load the data from CSV
|
|
66
|
+loader = CSVLoader(file_path=r"C:\Aiproject\codebasics_faqs.csv", encoding='iso-8859-1', source_column="Question")
|
|
67
|
+data = loader.load()
|
|
68
|
+text_splitter=RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50)
|
|
69
|
+texts=text_splitter.split_documents(data)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+# Create a FAISS instance for vector database
|
|
73
|
+vectordb = FAISS.from_documents(documents=texts, embedding=instructor_embeddings)
|
|
74
|
+
|
|
75
|
+# Create a retriever for querying the vector database
|
|
76
|
+retriever = vectordb.as_retriever(score_threshold=0.7)
|
|
77
|
+
|
|
78
|
+# Define the prompt template
|
|
79
|
+# prompt_template = """Use the following pieces of information to answer the user's question,dont give additional answers.
|
|
80
|
+# If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
|
81
|
+
|
|
82
|
+prompt_template="""Use the following peices of information to answer the user's question and do not give any additional or extra answers that do not exist in the given information.If the answer doesn't exist in the given information, say "I dont know". If the answer exists in the information, give the exact answer as it exists in the given information.Users may use different keywords or synonyms, so use at least 10 keywords or synonyms from the user's question to identify exactly what the user wants to know from given information.Please refrain from making your own answers or modifying the answers.
|
|
83
|
+CONTEXT: {context}
|
|
84
|
+
|
|
85
|
+QUESTION: {question}"""
|
|
86
|
+
|
|
87
|
+# Create a prompt template
|
|
88
|
+PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
|
89
|
+chain_type_kwargs = {"prompt": PROMPT}
|
|
90
|
+
|
|
91
|
+# Create the Langchain RetrievalQA chain
|
|
92
|
+chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, input_key="query",
|
|
93
|
+ return_source_documents=True, chain_type_kwargs=chain_type_kwargs)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+@app.route('/', methods=['GET', 'POST'])
|
|
98
|
+def index():
|
|
99
|
+ if request.method == 'POST':
|
|
100
|
+ question = request.form['question']
|
|
101
|
+ if question:
|
|
102
|
+ result = chain(question)['result']
|
|
103
|
+ else:
|
|
104
|
+ result = None
|
|
105
|
+ return render_template('index1.html', question=question, result=result)
|
|
106
|
+ return render_template('index1.html', question='', result=None)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+if __name__ == '__main__':
|
|
110
|
+ app.run(host="0.0.0.0",debug=False)
|