Spaces:
Sleeping
Sleeping
File size: 6,950 Bytes
f785793 3f5bde9 f785793 fc583d6 f785793 09e2eff f785793 09e2eff f785793 09e2eff f785793 09e2eff f785793 09e2eff f785793 09e2eff f785793 09e2eff f785793 4c4e574 f785793 4c4e574 f785793 4c4e574 f785793 09e2eff 5c9cfbf 09e2eff 5c9cfbf 09e2eff f785793 4c4e574 f785793 09e2eff f785793 09e2eff f785793 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import os
import json
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.vectorstores import FAISS
from sentence_transformers import SentenceTransformer
import faiss
import torch
import numpy as np
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
from datetime import datetime
import gradio as gr
class DocumentRetrievalAndGeneration:
def __init__(self, embedding_model_name, lm_model_id, data_folder, faiss_index_path):
self.documents = self.load_documents(data_folder)
self.embeddings = SentenceTransformer(embedding_model_name)
self.gpu_index = self.load_faiss_index(faiss_index_path)
self.llm = self.initialize_llm(lm_model_id)
def load_documents(self, folder_path):
loader = DirectoryLoader(folder_path, loader_cls=TextLoader)
documents = loader.load()
print('Length of documents:', len(documents))
return documents
def load_faiss_index(self, faiss_index_path):
cpu_index = faiss.read_index(faiss_index_path)
gpu_resource = faiss.StandardGpuResources()
gpu_index = faiss.index_cpu_to_gpu(gpu_resource, 0, cpu_index)
return gpu_index
def initialize_llm(self, model_id):
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
tokenizer = AutoTokenizer.from_pretrained(model_id)
generate_text = pipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True,
task='text-generation',
temperature=0.6,
max_new_tokens=2048,
)
return generate_text
def query_and_generate_response(self, query):
query_embedding = self.embeddings.encode(query, convert_to_tensor=True).cpu().numpy()
distances, indices = self.gpu_index.search(np.array([query_embedding]), k=5)
content = ""
for idx in indices[0]:
content += "-" * 50 + "\n"
content += self.documents[idx].page_content + "\n"
print(self.documents[idx].page_content)
print("############################")
prompt=f"""
You are a knowledgeable assistant with access to a comprehensive database.
I need you to answer my question and provide related information in a specific format.
I have provided five relatable json files {content}, choose the most suitable chunks for answering the query
Here's what I need:
Include a final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point.
content
Here's my question:
Query:{query}
Solution==>
Example1
Query: "How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM",
Solution: "To use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM, you need to modify the configuration file of the NDK application. Specifically, change the processor reference from 'A15_0' to 'IPU1_0'.",
Example2
Query: "Can BQ25896 support I2C interface?",
Solution: "Yes, the BQ25896 charger supports the I2C interface for communication.",
"""
# prompt = f"Query: {query}\nSolution: {content}\n"
# Encode and prepare inputs
messages = [{"role": "user", "content": prompt}]
encodeds = self.llm.tokenizer.apply_chat_template(messages, return_tensors="pt")
model_inputs = encodeds.to(self.llm.device)
# Perform inference and measure time
start_time = datetime.now()
generated_ids = self.llm.model.generate(model_inputs, max_new_tokens=1000, do_sample=True)
elapsed_time = datetime.now() - start_time
# Decode and return output
decoded = self.llm.tokenizer.batch_decode(generated_ids)
generated_response = decoded[0]
print("Generated response:", generated_response)
print("Time elapsed:", elapsed_time)
print("Device in use:", self.llm.device)
return generated_response,content
def qa_infer_gradio(self, query):
response = self.query_and_generate_response(query)
return response
if __name__ == "__main__":
# Example usage
embedding_model_name = 'flax-sentence-embeddings/all_datasets_v3_MiniLM-L12'
lm_model_id = "mistralai/Mistral-7B-Instruct-v0.2"
data_folder = 'sample_embedding_folder'
faiss_index_path = 'faiss_index_new_model3.index'
doc_retrieval_gen = DocumentRetrievalAndGeneration(embedding_model_name, lm_model_id, data_folder, faiss_index_path)
# Define Gradio interface function
def launch_interface():
css_code = """
.gradio-container {
background-color: #daccdb;
}
/* Button styling for all buttons */
button {
background-color: #927fc7; /* Default color for all other buttons */
color: black;
border: 1px solid black;
padding: 10px;
margin-right: 10px;
font-size: 16px; /* Increase font size */
font-weight: bold; /* Make text bold */
}
"""
EXAMPLES = ["Does the VIP modules & CSI2 module could work simultaneously? ",
"I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?",
"Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?"]
file_path = "ticketNames.txt"
# Read the file content
with open(file_path, "r") as file:
content = file.read()
ticket_names = json.loads(content)
dropdown = gr.Dropdown(label="Sample queries", choices=ticket_names)
# Define Gradio interface
interface = gr.Interface(
fn=doc_retrieval_gen.qa_infer_gradio,
inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")],
allow_flagging='never',
examples=EXAMPLES,
cache_examples=False,
outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")],
css=css_code
)
# Launch Gradio interface
interface.launch(debug=True)
# Launch the interface
launch_interface()
|