Gopikanth123 commited on
Commit
f7c3de5
·
verified ·
1 Parent(s): 0edf0f8

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +312 -129
main.py CHANGED
@@ -1,137 +1,318 @@
1
- import os
2
- import shutil
3
- from flask import Flask, render_template, request, jsonify
4
- from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
5
- from llama_index.llms.huggingface import HuggingFaceInferenceAPI
6
- from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
- from huggingface_hub import InferenceClient
8
- from transformers import AutoTokenizer, AutoModel
9
- from deep_translator import GoogleTranslator
 
10
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- # Ensure HF_TOKEN is set
13
- HF_TOKEN = os.getenv("HF_TOKEN")
14
- if not HF_TOKEN:
15
- raise ValueError("HF_TOKEN environment variable not set.")
16
-
17
- repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
18
- llm_client = InferenceClient(
19
- model=repo_id,
20
- token=HF_TOKEN,
21
- )
22
-
23
- # Configure Llama index settings
24
- Settings.llm = HuggingFaceInferenceAPI(
25
- model_name=repo_id,
26
- tokenizer_name=repo_id,
27
- context_window=3000,
28
- token=HF_TOKEN,
29
- max_new_tokens=512,
30
- generate_kwargs={"temperature": 0.1},
31
- )
32
- # Settings.embed_model = HuggingFaceEmbedding(
33
- # model_name="BAAI/bge-small-en-v1.5"
34
  # )
35
- # Replace the embedding model with XLM-R
 
 
 
 
 
 
36
  # Settings.embed_model = HuggingFaceEmbedding(
37
- # model_name="xlm-roberta-base" # XLM-RoBERTa model for multilingual support
38
  # )
39
- Settings.embed_model = HuggingFaceEmbedding(
40
- model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
41
- )
42
 
43
- # Configure tokenizer and model if required
44
- tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
45
- model = AutoModel.from_pretrained("xlm-roberta-base")
46
 
47
- PERSIST_DIR = "db"
48
- PDF_DIRECTORY = 'data'
49
 
50
- # Ensure directories exist
51
- os.makedirs(PDF_DIRECTORY, exist_ok=True)
52
- os.makedirs(PERSIST_DIR, exist_ok=True)
53
- chat_history = []
54
- current_chat_history = []
55
 
56
- def data_ingestion_from_directory():
57
- # Clear previous data by removing the persist directory
58
- if os.path.exists(PERSIST_DIR):
59
- shutil.rmtree(PERSIST_DIR) # Remove the persist directory and all its contents
60
 
61
- # Recreate the persist directory after removal
62
- os.makedirs(PERSIST_DIR, exist_ok=True)
63
 
64
- # Load new documents from the directory
65
- new_documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
66
 
67
- # Create a new index with the new documents
68
- index = VectorStoreIndex.from_documents(new_documents)
69
 
70
- # Persist the new index
71
- index.storage_context.persist(persist_dir=PERSIST_DIR)
72
 
73
- # def handle_query(query):
74
- # context_str = ""
75
 
76
- # # Build context from current chat history
77
- # for past_query, response in reversed(current_chat_history):
78
- # if past_query.strip():
79
- # context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  # chat_text_qa_msgs = [
82
  # (
83
  # "user",
84
  # """
85
- # You are the Taj Hotel voice chatbot and your name is Taj hotel helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the Taj hotel data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
86
  # {context_str}
87
  # Question:
88
  # {query_str}
89
  # """
90
  # )
91
  # ]
 
 
 
 
 
 
 
 
92
 
 
 
 
93
 
94
-
95
- # text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
96
-
97
- # storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
98
- # index = load_index_from_storage(storage_context)
99
- # # context_str = ""
100
-
101
- # # # Build context from current chat history
102
- # # for past_query, response in reversed(current_chat_history):
103
- # # if past_query.strip():
104
- # # context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
105
-
106
- # query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
107
- # print(f"Querying: {query}")
108
- # answer = query_engine.query(query)
109
-
110
- # # Extracting the response
111
- # if hasattr(answer, 'response'):
112
- # response = answer.response
113
- # elif isinstance(answer, dict) and 'response' in answer:
114
- # response = answer['response']
115
- # else:
116
- # response = "I'm sorry, I couldn't find an answer to that."
117
-
118
- # # Append to chat history
119
- # current_chat_history.append((query, response))
120
  # return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  def handle_query(query):
122
  chat_text_qa_msgs = [
123
  (
124
  "user",
125
  """
126
- You are the Hotel voice chatbot and your name is hotel helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the hotel's data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
127
  {context_str}
128
  Question:
129
  {query_str}
130
  """
131
  )
132
  ]
 
133
  text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
134
-
135
  storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
136
  index = load_index_from_storage(storage_context)
137
  context_str = ""
@@ -139,30 +320,32 @@ def handle_query(query):
139
  if past_query.strip():
140
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
141
 
142
- query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
143
- print(query)
144
- answer = query_engine.query(query)
 
 
 
 
 
 
 
145
 
146
- if hasattr(answer, 'response'):
147
- response = answer.response
148
- elif isinstance(answer, dict) and 'response' in answer:
149
- response = answer['response']
150
- else:
151
- response = "Sorry, I couldn't find an answer."
152
  current_chat_history.append((query, response))
153
  return response
154
 
155
- app = Flask(__name__)
156
 
157
- # Data ingestion
158
- data_ingestion_from_directory()
159
 
160
  # Generate Response
161
- def generate_response(query, language):
162
- try:
163
- # Call the handle_query function to get the response
164
  bot_response = handle_query(query)
165
-
166
  # Map of supported languages
167
  supported_languages = {
168
  "hindi": "hi",
@@ -201,10 +384,10 @@ def generate_response(query, language):
201
  "ukrainian": "uk",
202
  "turkish": "tr"
203
  }
204
-
205
  # Initialize the translated text
206
  translated_text = bot_response
207
-
208
  # Translate only if the language is supported and not English
209
  try:
210
  if language in supported_languages:
@@ -217,31 +400,31 @@ def generate_response(query, language):
217
  # Handle translation errors
218
  print(f"Translation error: {e}")
219
  translated_text = "Sorry, I couldn't translate the response."
220
-
221
  # Append to chat history
222
  chat_history.append((query, translated_text))
223
- return translated_text
224
- except Exception as e:
225
  return f"Error fetching the response: {str(e)}"
226
 
227
- # Route for the homepage
228
- @app.route('/')
229
- def index():
230
- return render_template('index.html')
231
 
232
- # Route to handle chatbot messages
233
- @app.route('/chat', methods=['POST'])
234
- def chat():
235
- try:
236
  user_message = request.json.get("message")
237
  language = request.json.get("language")
238
- if not user_message:
239
- return jsonify({"response": "Please say something!"})
240
 
241
- bot_response = generate_response(user_message,language)
242
- return jsonify({"response": bot_response})
243
- except Exception as e:
244
- return jsonify({"response": f"An error occurred: {str(e)}"})
245
 
246
- if __name__ == '__main__':
247
  app.run(debug=True)
 
1
+ # import os
2
+ # import shutil
3
+ # from flask import Flask, render_template, request, jsonify
4
+ # from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
5
+ # from llama_index.llms.huggingface import HuggingFaceInferenceAPI
6
+ # from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
+ # from huggingface_hub import InferenceClient
8
+ # from transformers import AutoTokenizer, AutoModel
9
+ # from deep_translator import GoogleTranslator
10
+
11
 
12
+ # # Ensure HF_TOKEN is set
13
+ # HF_TOKEN = os.getenv("HF_TOKEN")
14
+ # if not HF_TOKEN:
15
+ # raise ValueError("HF_TOKEN environment variable not set.")
16
+
17
+ # repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
18
+ # llm_client = InferenceClient(
19
+ # model=repo_id,
20
+ # token=HF_TOKEN,
21
+ # )
22
 
23
+ # # Configure Llama index settings
24
+ # Settings.llm = HuggingFaceInferenceAPI(
25
+ # model_name=repo_id,
26
+ # tokenizer_name=repo_id,
27
+ # context_window=3000,
28
+ # token=HF_TOKEN,
29
+ # max_new_tokens=512,
30
+ # generate_kwargs={"temperature": 0.1},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  # )
32
+ # # Settings.embed_model = HuggingFaceEmbedding(
33
+ # # model_name="BAAI/bge-small-en-v1.5"
34
+ # # )
35
+ # # Replace the embedding model with XLM-R
36
+ # # Settings.embed_model = HuggingFaceEmbedding(
37
+ # # model_name="xlm-roberta-base" # XLM-RoBERTa model for multilingual support
38
+ # # )
39
  # Settings.embed_model = HuggingFaceEmbedding(
40
+ # model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
41
  # )
 
 
 
42
 
43
+ # # Configure tokenizer and model if required
44
+ # tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
45
+ # model = AutoModel.from_pretrained("xlm-roberta-base")
46
 
47
+ # PERSIST_DIR = "db"
48
+ # PDF_DIRECTORY = 'data'
49
 
50
+ # # Ensure directories exist
51
+ # os.makedirs(PDF_DIRECTORY, exist_ok=True)
52
+ # os.makedirs(PERSIST_DIR, exist_ok=True)
53
+ # chat_history = []
54
+ # current_chat_history = []
55
 
56
+ # def data_ingestion_from_directory():
57
+ # # Clear previous data by removing the persist directory
58
+ # if os.path.exists(PERSIST_DIR):
59
+ # shutil.rmtree(PERSIST_DIR) # Remove the persist directory and all its contents
60
 
61
+ # # Recreate the persist directory after removal
62
+ # os.makedirs(PERSIST_DIR, exist_ok=True)
63
 
64
+ # # Load new documents from the directory
65
+ # new_documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
66
 
67
+ # # Create a new index with the new documents
68
+ # index = VectorStoreIndex.from_documents(new_documents)
69
 
70
+ # # Persist the new index
71
+ # index.storage_context.persist(persist_dir=PERSIST_DIR)
72
 
73
+ # # def handle_query(query):
74
+ # # context_str = ""
75
 
76
+ # # # Build context from current chat history
77
+ # # for past_query, response in reversed(current_chat_history):
78
+ # # if past_query.strip():
79
+ # # context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
80
 
81
+ # # chat_text_qa_msgs = [
82
+ # # (
83
+ # # "user",
84
+ # # """
85
+ # # You are the Taj Hotel voice chatbot and your name is Taj hotel helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the Taj hotel data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
86
+ # # {context_str}
87
+ # # Question:
88
+ # # {query_str}
89
+ # # """
90
+ # # )
91
+ # # ]
92
+
93
+
94
+
95
+ # # text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
96
+
97
+ # # storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
98
+ # # index = load_index_from_storage(storage_context)
99
+ # # # context_str = ""
100
+
101
+ # # # # Build context from current chat history
102
+ # # # for past_query, response in reversed(current_chat_history):
103
+ # # # if past_query.strip():
104
+ # # # context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
105
+
106
+ # # query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
107
+ # # print(f"Querying: {query}")
108
+ # # answer = query_engine.query(query)
109
+
110
+ # # # Extracting the response
111
+ # # if hasattr(answer, 'response'):
112
+ # # response = answer.response
113
+ # # elif isinstance(answer, dict) and 'response' in answer:
114
+ # # response = answer['response']
115
+ # # else:
116
+ # # response = "I'm sorry, I couldn't find an answer to that."
117
+
118
+ # # # Append to chat history
119
+ # # current_chat_history.append((query, response))
120
+ # # return response
121
+ # def handle_query(query):
122
  # chat_text_qa_msgs = [
123
  # (
124
  # "user",
125
  # """
126
+ # You are the Hotel voice chatbot and your name is hotel helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the hotel's data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
127
  # {context_str}
128
  # Question:
129
  # {query_str}
130
  # """
131
  # )
132
  # ]
133
+ # text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
134
+
135
+ # storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
136
+ # index = load_index_from_storage(storage_context)
137
+ # context_str = ""
138
+ # for past_query, response in reversed(current_chat_history):
139
+ # if past_query.strip():
140
+ # context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
141
 
142
+ # query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
143
+ # print(query)
144
+ # answer = query_engine.query(query)
145
 
146
+ # if hasattr(answer, 'response'):
147
+ # response = answer.response
148
+ # elif isinstance(answer, dict) and 'response' in answer:
149
+ # response = answer['response']
150
+ # else:
151
+ # response = "Sorry, I couldn't find an answer."
152
+ # current_chat_history.append((query, response))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  # return response
154
+
155
+ # app = Flask(__name__)
156
+
157
+ # # Data ingestion
158
+ # data_ingestion_from_directory()
159
+
160
+ # # Generate Response
161
+ # def generate_response(query, language):
162
+ # try:
163
+ # # Call the handle_query function to get the response
164
+ # bot_response = handle_query(query)
165
+
166
+ # # Map of supported languages
167
+ # supported_languages = {
168
+ # "hindi": "hi",
169
+ # "bengali": "bn",
170
+ # "telugu": "te",
171
+ # "marathi": "mr",
172
+ # "tamil": "ta",
173
+ # "gujarati": "gu",
174
+ # "kannada": "kn",
175
+ # "malayalam": "ml",
176
+ # "punjabi": "pa",
177
+ # "odia": "or",
178
+ # "urdu": "ur",
179
+ # "assamese": "as",
180
+ # "sanskrit": "sa",
181
+ # "arabic": "ar",
182
+ # "australian": "en-AU",
183
+ # "bangla-india": "bn-IN",
184
+ # "chinese": "zh-CN",
185
+ # "dutch": "nl",
186
+ # "french": "fr",
187
+ # "filipino": "tl",
188
+ # "greek": "el",
189
+ # "indonesian": "id",
190
+ # "italian": "it",
191
+ # "japanese": "ja",
192
+ # "korean": "ko",
193
+ # "latin": "la",
194
+ # "nepali": "ne",
195
+ # "portuguese": "pt",
196
+ # "romanian": "ro",
197
+ # "russian": "ru",
198
+ # "spanish": "es",
199
+ # "swedish": "sv",
200
+ # "thai": "th",
201
+ # "ukrainian": "uk",
202
+ # "turkish": "tr"
203
+ # }
204
+
205
+ # # Initialize the translated text
206
+ # translated_text = bot_response
207
+
208
+ # # Translate only if the language is supported and not English
209
+ # try:
210
+ # if language in supported_languages:
211
+ # target_lang = supported_languages[language]
212
+ # translated_text = GoogleTranslator(source='en', target=target_lang).translate(bot_response)
213
+ # print(translated_text)
214
+ # else:
215
+ # print(f"Unsupported language: {language}")
216
+ # except Exception as e:
217
+ # # Handle translation errors
218
+ # print(f"Translation error: {e}")
219
+ # translated_text = "Sorry, I couldn't translate the response."
220
+
221
+ # # Append to chat history
222
+ # chat_history.append((query, translated_text))
223
+ # return translated_text
224
+ # except Exception as e:
225
+ # return f"Error fetching the response: {str(e)}"
226
+
227
+ # # Route for the homepage
228
+ # @app.route('/')
229
+ # def index():
230
+ # return render_template('index.html')
231
+
232
+ # # Route to handle chatbot messages
233
+ # @app.route('/chat', methods=['POST'])
234
+ # def chat():
235
+ # try:
236
+ # user_message = request.json.get("message")
237
+ # language = request.json.get("language")
238
+ # if not user_message:
239
+ # return jsonify({"response": "Please say something!"})
240
+
241
+ # bot_response = generate_response(user_message,language)
242
+ # return jsonify({"response": bot_response})
243
+ # except Exception as e:
244
+ # return jsonify({"response": f"An error occurred: {str(e)}"})
245
+
246
+ # if __name__ == '__main__':
247
+ # app.run(debug=True)
248
+
249
+ import os
250
+ import shutil
251
+ from flask import Flask, render_template, request, jsonify
252
+ from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
253
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
254
+ from deep_translator import GoogleTranslator
255
+ from vertexai.preview.generative_models import GenerativeModel
256
+
257
+ # Ensure HF_TOKEN is set (optional if you're not using HuggingFace embeddings anymore)
258
+ HF_TOKEN = os.getenv("HF_TOKEN")
259
+ if not HF_TOKEN:
260
+ raise ValueError("HF_TOKEN environment variable not set.")
261
+
262
+ # Configure Gemini API
263
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
264
+ if not GOOGLE_API_KEY:
265
+ raise ValueError("GOOGLE_API_KEY environment variable not set.")
266
+
267
+ # Initialize Gemini Flash 1.0
268
+ gemini_flash_model = GenerativeModel("gemini-flash-1.0")
269
+
270
+ # Configure Llama index settings
271
+ Settings.embed_model = HuggingFaceEmbedding(
272
+ model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
273
+ )
274
+
275
+ PERSIST_DIR = "db"
276
+ PDF_DIRECTORY = 'data'
277
+
278
+ # Ensure directories exist
279
+ os.makedirs(PDF_DIRECTORY, exist_ok=True)
280
+ os.makedirs(PERSIST_DIR, exist_ok=True)
281
+ chat_history = []
282
+ current_chat_history = []
283
+
284
+ def data_ingestion_from_directory():
285
+ # Clear previous data by removing the persist directory
286
+ if os.path.exists(PERSIST_DIR):
287
+ shutil.rmtree(PERSIST_DIR) # Remove the persist directory and all its contents
288
+
289
+ # Recreate the persist directory after removal
290
+ os.makedirs(PERSIST_DIR, exist_ok=True)
291
+
292
+ # Load new documents from the directory
293
+ new_documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
294
+
295
+ # Create a new index with the new documents
296
+ index = VectorStoreIndex.from_documents(new_documents)
297
+
298
+ # Persist the new index
299
+ index.storage_context.persist(persist_dir=PERSIST_DIR)
300
+
301
  def handle_query(query):
302
  chat_text_qa_msgs = [
303
  (
304
  "user",
305
  """
306
+ You are the Hotel voice chatbot and your name is hotel helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the hotel's data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
307
  {context_str}
308
  Question:
309
  {query_str}
310
  """
311
  )
312
  ]
313
+
314
  text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
315
+
316
  storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
317
  index = load_index_from_storage(storage_context)
318
  context_str = ""
 
320
  if past_query.strip():
321
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
322
 
323
+ # Use Gemini Flash 1.0 to generate a response
324
+ prompt = f"""
325
+ Context: {context_str}
326
+ Question: {query}
327
+ Answer:
328
+ """
329
+ gemini_response = gemini_flash_model.generate(prompt=prompt, max_output_tokens=100, temperature=0.1)
330
+
331
+ # Extract the response
332
+ response = gemini_response.candidates[0].content.parts[0].text
333
 
334
+ # Append to chat history
 
 
 
 
 
335
  current_chat_history.append((query, response))
336
  return response
337
 
338
+ app = Flask(__name__)
339
 
340
+ # Data ingestion
341
+ data_ingestion_from_directory()
342
 
343
  # Generate Response
344
+ def generate_response(query, language):
345
+ try:
346
+ # Call the handle_query function to get the response
347
  bot_response = handle_query(query)
348
+
349
  # Map of supported languages
350
  supported_languages = {
351
  "hindi": "hi",
 
384
  "ukrainian": "uk",
385
  "turkish": "tr"
386
  }
387
+
388
  # Initialize the translated text
389
  translated_text = bot_response
390
+
391
  # Translate only if the language is supported and not English
392
  try:
393
  if language in supported_languages:
 
400
  # Handle translation errors
401
  print(f"Translation error: {e}")
402
  translated_text = "Sorry, I couldn't translate the response."
403
+
404
  # Append to chat history
405
  chat_history.append((query, translated_text))
406
+ return translated_text
407
+ except Exception as e:
408
  return f"Error fetching the response: {str(e)}"
409
 
410
+ # Route for the homepage
411
+ @app.route('/')
412
+ def index():
413
+ return render_template('index.html')
414
 
415
+ # Route to handle chatbot messages
416
+ @app.route('/chat', methods=['POST'])
417
+ def chat():
418
+ try:
419
  user_message = request.json.get("message")
420
  language = request.json.get("language")
421
+ if not user_message:
422
+ return jsonify({"response": "Please say something!"})
423
 
424
+ bot_response = generate_response(user_message, language)
425
+ return jsonify({"response": bot_response})
426
+ except Exception as e:
427
+ return jsonify({"response": f"An error occurred: {str(e)}"})
428
 
429
+ if __name__ == '__main__':
430
  app.run(debug=True)