Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import faiss | |
import numpy as np | |
import pickle | |
from sentence_transformers import SentenceTransformer | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if not HF_TOKEN: | |
raise ValueError("HF_TOKEN environment variable not set. Please configure it in Space settings.") | |
# Load precomputed chunks and FAISS index | |
with open("chunks.pkl", "rb") as f: | |
chunks = pickle.load(f) | |
index = faiss.read_index("index.faiss") | |
# Load embedding model (same as used in preprocessing) | |
embedding_model = SentenceTransformer("sentence-transformers/paraphrase-multilingual-mpnet-base-v2") | |
# Load Jais model and tokenizer | |
model_name = "aubmindlab/aragpt2-base" | |
tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True) | |
model = AutoModelForCausalLM.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True) | |
# RAG function to retrieve and generate a response | |
def get_response(query, k=3): | |
query_embedding = embedding_model.encode([query]) | |
distances, indices = index.search(np.array(query_embedding), k) | |
retrieved_chunks = [chunks[i] for i in indices[0]] | |
context = " ".join(retrieved_chunks) | |
prompt = f"Based on the following documents: {context}, answer the question: {query}" | |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512) | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=200, | |
do_sample=True, | |
temperature=0.7, | |
top_p=0.9, | |
return_full_text=False | |
) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response.split(query)[-1].strip() | |
# Gradio interface | |
import gradio as gr | |
with gr.Blocks(title="المتحدث الآلي للتشريعات المحلية لإمارة دبي") as demo: | |
gr.Markdown("# Dubai Legislation Chatbot\nاسأل أي سؤال حول تشريعات دبي- نسخة تجريبة (تصميم وتنفيذ م.أسامة الخطيب") | |
chatbot = gr.Chatbot(elem_id="chatbot") # Assign an ID for CSS targeting | |
msg = gr.Textbox(placeholder="اكتب سؤالك هنا...", rtl=True) | |
clear = gr.Button("مسح") | |
def user(user_message, history): | |
return "", history + [[user_message, None]] | |
def bot(history): | |
user_message = history[-1][0] | |
bot_message = get_response(user_message) # Your response function | |
history[-1][1] = bot_message | |
return history | |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( | |
bot, chatbot, chatbot | |
) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
# Launch with custom CSS | |
demo.launch(css=""" | |
#chatbot { | |
direction: rtl; | |
text-align: right; | |
width: 100%; | |
} | |
""", share=True) | |