Spaces:
Runtime error
Runtime error
File size: 5,323 Bytes
8b582d7 5cb1a88 cfee418 5cb1a88 57eb8dd 5cb1a88 d69c313 5cb1a88 e03f71b 05284ce 1ed2792 7db0360 befa614 cfee418 3c00fbf f7cf69d 3c00fbf bce5d54 cfee418 48dc91c f7cf69d 17a0d8a 32db4b3 cfee418 5cb1a88 befa614 5cb1a88 e81f48e 5cb1a88 d69c313 5cb1a88 d69c313 5cb1a88 d69c313 5cb1a88 d69c313 48dc91c d69c313 5cb1a88 d69c313 5cb1a88 d69c313 5cb1a88 d69c313 5cb1a88 d69c313 5cb1a88 d69c313 5cb1a88 d69c313 5cb1a88 d69c313 5cb1a88 8b582d7 5cb1a88 08fa5db 5cb1a88 0853b21 5cb1a88 dda6b30 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
import gradio as gr
from datasets import load_dataset
import os
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
import torch
from threading import Thread
from sentence_transformers import SentenceTransformer
import numpy as np
token = os.environ["HF_TOKEN"]
ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
dataset = load_dataset("Yoxas/statistical_literacyv2")
data = dataset["train"]
# Convert the string embeddings to numerical arrays and ensure they are 2D
def convert_and_ensure_2d_embeddings(example):
# Clean the embedding string
embedding_str = example['embedding']
embedding_str = embedding_str.replace('\n', ' ').replace('...', '')
embedding_list = list(map(float, embedding_str.strip("[]").split()))
embeddings = np.array(embedding_list, dtype=np.float32)
# Ensure the embeddings are 2-dimensional
if embeddings.ndim == 1:
embeddings = embeddings.reshape(1, -1)
return {'embedding': embeddings}
# Apply the function to ensure embeddings are 2-dimensional and of type float32
data = data.map(convert_and_ensure_2d_embeddings)
# Flatten embeddings if they are nested 2D arrays
def flatten_embeddings(example):
embedding = example['embedding']
if embedding.ndim == 2 and embedding.shape[0] == 1:
embedding = embedding.flatten()
return {'embedding': embedding}
data = data.map(flatten_embeddings)
# Ensure embeddings are in the correct shape for FAISS
embeddings = np.array(data['embedding'].tolist(), dtype=np.float32)
# Add FAISS index
data.add_faiss_index_from_external_arrays(embeddings)
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
# use quantization to lower GPU usage
bnb_config = BitsAndBytesConfig(
load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained(model_id, token=token)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map="auto",
quantization_config=bnb_config,
token=token
)
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("")
]
SYS_PROMPT = """You are an assistant for answering questions.
You are given the extracted parts of a long document and a question. Provide a conversational answer.
If you don't know the answer, just say "I do not know." Don't make up an answer."""
def search(query: str, k: int = 3):
"""a function that embeds a new query and returns the most probable results"""
embedded_query = ST.encode(query) # embed new query
scores, retrieved_examples = data.get_nearest_examples( # retrieve results
"embedding", embedded_query, # compare our new embedded query with the dataset embeddings
k=k # get only top k results
)
return scores, retrieved_examples
def format_prompt(prompt, retrieved_documents, k):
"""using the retrieved documents we will prompt the model to generate our responses"""
PROMPT = f"Question:{prompt}\nContext:"
for idx in range(k):
PROMPT += f"{retrieved_documents['text'][idx]}\n"
return PROMPT
@spaces.GPU(duration=150)
def talk(prompt, history):
k = 1 # number of retrieved documents
scores, retrieved_documents = search(prompt, k)
formatted_prompt = format_prompt(prompt, retrieved_documents, k)
formatted_prompt = formatted_prompt[:2000] # to avoid GPU OOM
messages = [{"role": "system", "content": SYS_PROMPT}, {"role": "user", "content": formatted_prompt}]
# tell the model to generate
input_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt"
).to(model.device)
outputs = model.generate(
input_ids,
max_new_tokens=1024,
eos_token_id=terminators,
do_sample=True,
temperature=0.6,
top_p=0.9,
)
streamer = TextIteratorStreamer(
tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=1024,
do_sample=True,
top_p=0.95,
temperature=0.75,
eos_token_id=terminators,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
print(outputs)
yield "".join(outputs)
TITLE = "# RAG"
DESCRIPTION = """
A rag pipeline with a chatbot feature
Resources used to build this project :
* embedding model : https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1
* dataset : https://huggingface.co/datasets/not-lain/wikipedia
* faiss docs : https://huggingface.co/docs/datasets/v2.18.0/en/package_reference/main_classes#datasets.Dataset.add_faiss_index
* chatbot : https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
"""
demo = gr.ChatInterface(
fn=talk,
chatbot=gr.Chatbot(
show_label=True,
show_share_button=True,
show_copy_button=True,
likeable=True,
layout="bubble",
bubble_full_width=False,
),
theme="Soft",
examples=[["what's anarchy ? "]],
title=TITLE,
description=DESCRIPTION,
)
demo.launch(debug=True)
|