Gopikanth123 commited on
Commit
2064f47
·
verified ·
1 Parent(s): 86bf60b

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +12 -18
main.py CHANGED
@@ -252,20 +252,16 @@ from flask import Flask, render_template, request, jsonify
252
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
253
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
254
  from deep_translator import GoogleTranslator
255
- from vertexai.preview.generative_models import GenerativeModel
256
 
257
- # Ensure HF_TOKEN is set (optional if you're not using HuggingFace embeddings anymore)
258
- HF_TOKEN = os.getenv("HF_TOKEN")
259
- if not HF_TOKEN:
260
- raise ValueError("HF_TOKEN environment variable not set.")
261
-
262
- # Configure Gemini API
263
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
264
  if not GOOGLE_API_KEY:
265
  raise ValueError("GOOGLE_API_KEY environment variable not set.")
266
 
267
- # Initialize Gemini Flash 1.0
268
- gemini_flash_model = GenerativeModel("gemini-flash-1.0")
 
269
 
270
  # Configure Llama index settings
271
  Settings.embed_model = HuggingFaceEmbedding(
@@ -303,14 +299,13 @@ def handle_query(query):
303
  (
304
  "user",
305
  """
306
- You are the Hotel voice chatbot and your name is hotel helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the hotel's data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
307
  {context_str}
308
  Question:
309
  {query_str}
310
  """
311
  )
312
  ]
313
-
314
  text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
315
 
316
  storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
@@ -320,18 +315,17 @@ def handle_query(query):
320
  if past_query.strip():
321
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
322
 
323
- # Use Gemini Flash 1.0 to generate a response
 
 
 
324
  prompt = f"""
325
  Context: {context_str}
326
  Question: {query}
327
- Answer:
328
  """
329
- gemini_response = gemini_flash_model.generate(prompt=prompt, max_output_tokens=100, temperature=0.1)
330
-
331
- # Extract the response
332
- response = gemini_response.candidates[0].content.parts[0].text
333
 
334
- # Append to chat history
335
  current_chat_history.append((query, response))
336
  return response
337
 
 
252
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
253
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
254
  from deep_translator import GoogleTranslator
255
+ import google.generativeai as genai
256
 
257
+ # Ensure GOOGLE_API_KEY is set
 
 
 
 
 
258
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
259
  if not GOOGLE_API_KEY:
260
  raise ValueError("GOOGLE_API_KEY environment variable not set.")
261
 
262
+ # Configure Gemini model
263
+ genai.configure(api_key=GOOGLE_API_KEY)
264
+ gemini_model = genai.GenerativeModel('gemini-flash-1.0')
265
 
266
  # Configure Llama index settings
267
  Settings.embed_model = HuggingFaceEmbedding(
 
299
  (
300
  "user",
301
  """
302
+ You are the Hotel voice chatbot and your name is hotel helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the hotel's data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
303
  {context_str}
304
  Question:
305
  {query_str}
306
  """
307
  )
308
  ]
 
309
  text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
310
 
311
  storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
 
315
  if past_query.strip():
316
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
317
 
318
+ query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
319
+ print(query)
320
+
321
+ # Use Gemini for generating the response
322
  prompt = f"""
323
  Context: {context_str}
324
  Question: {query}
 
325
  """
326
+ gemini_response = gemini_model.generate_content(prompt)
327
+ response = gemini_response.text
 
 
328
 
 
329
  current_chat_history.append((query, response))
330
  return response
331