import pandas as pd import torch from sentence_transformers import SentenceTransformer, util import gradio as gr import json from transformers import AutoTokenizer, AutoModelForQuestionAnswering, AutoModelForSequenceClassification import spaces # Load the CSV file with embeddings df = pd.read_csv('RBDx10kstats.csv') df['embedding'] = df['embedding'].apply(json.loads) # Convert JSON string back to list # Convert embeddings to tensor for efficient retrieval embeddings = torch.tensor(df['embedding'].tolist(), device=device) # Load the Sentence Transformer model model = SentenceTransformer('all-MiniLM-L6-v2', device=device) # Load the ai model for response generation tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased-distilled-squad") model_response = AutoModelForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased-distilled-squad").to(device) # Load the NLU model for intent detection nlu_model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased-finetuned-sst-2-english").to(device) # Define the function to find the most relevant document @spaces.GPU(duration=120) def retrieve_relevant_doc(query): query_embedding = model.encode(query, convert_to_tensor=True, device=device) similarities = util.pytorch_cos_sim(query_embedding, embeddings)[0] best_match_idx = torch.argmax(similarities).item() return df.iloc[best_match_idx]['Abstract'] # Define the function to detect intent @spaces.GPU(duration=120) def detect_intent(query): inputs = tokenizer(query, return_tensors="pt").to(device) outputs = nlu_model(inputs["input_ids"], attention_mask=inputs["attention_mask"]) intent = torch.argmax(outputs.logits).item() return intent # Define the function to generate a response @spaces.GPU(duration=120) def generate_response(query): relevant_doc = retrieve_relevant_doc(query) intent = detect_intent(query) if intent == 0: # Handle intent 0 (e.g., informational query) input_text = f"Document: {relevant_doc}\n\nQuestion: {query}\n\nAnswer:" inputs = tokenizer(input_text, return_tensors="pt").to(device) outputs = model_response.generate(inputs["input_ids"], max_length=500) response = tokenizer.decode(outputs[0], skip_special_tokens=True) elif intent == 1: # Handle intent 1 (e.g., opinion-based query) # Generate a response based on the detected intent response = "I'm not sure I understand your question. Can you please rephrase?" else: response = "I'm not sure I understand your question. Can you please rephrase?" return response # Create a Gradio interface iface = gr.Interface( fn=generate_response, inputs=gr.Textbox(lines=2, placeholder="Enter your query here..."), outputs="text", title="RAG Chatbot", description="This chatbot retrieves relevant documents based on your query and generates responses using ai models." ) # Launch the Gradio interface iface.launch()