import gradio as gr from transformers import AutoModel, AutoTokenizer import torch import torch.nn.functional as F # Load embedding model and tokenizer model_name = "Supabase/gte-small" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModel.from_pretrained(model_name) model.eval() def get_embedding(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) with torch.no_grad(): output = model(**inputs) # Mean pooling over token embeddings embeddings = output.last_hidden_state # Shape: (batch_size, seq_len, hidden_dim) attention_mask = inputs["attention_mask"].unsqueeze(-1) # Shape: (batch_size, seq_len, 1) # Apply mean pooling: Sum(token_embeddings * mask) / Sum(mask) pooled_embedding = (embeddings * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) # Normalize embedding return F.normalize(pooled_embedding, p=2, dim=1).squeeze() def get_similarity_and_excerpt(query, paragraph1, paragraph2, paragraph3, threshold_weight): paragraphs = [p for p in [paragraph1, paragraph2, paragraph3] if p.strip()] if not query.strip() or not paragraphs: return "Please provide both a query and at least one document paragraph." query_embedding = get_embedding(query) ranked_paragraphs = [] for paragraph in paragraphs: para_embedding = get_embedding(paragraph) similarity = F.cosine_similarity(query_embedding, para_embedding, dim=0).item() # Highlight words using threshold tokens = tokenizer.tokenize(paragraph) threshold = max(0.02, threshold_weight) highlighted_text = " ".join(f"{token}" if similarity > threshold else token for token in tokens) highlighted_text = tokenizer.convert_tokens_to_string(highlighted_text.split()) ranked_paragraphs.append({"similarity": similarity, "highlighted_text": highlighted_text}) ranked_paragraphs.sort(key=lambda x: x["similarity"], reverse=True) output_html = "
Cosine Similarity | Highlighted Paragraph |
---|---|
{round(item['similarity'], 4)} | {item['highlighted_text']} |