|
import gradio as gr |
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
import torch |
|
|
|
|
|
model_name = "cross-encoder/ms-marco-MiniLM-L-12-v2" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
model.eval() |
|
|
|
|
|
def get_relevance_score_and_excerpt(query, paragraph, threshold_weight): |
|
if not query.strip() or not paragraph.strip(): |
|
return "Please provide both a query and a document paragraph.", "" |
|
|
|
|
|
inputs = tokenizer(query, paragraph, return_tensors="pt", truncation=True, padding=True) |
|
|
|
with torch.no_grad(): |
|
output = model(**inputs, output_attentions=True) |
|
|
|
|
|
logit = output.logits.squeeze().item() |
|
base_relevance_score = logit |
|
|
|
|
|
dynamic_threshold = max(0.02, threshold_weight) |
|
|
|
|
|
attention = output.attentions[-1] |
|
attention_scores = attention.mean(dim=1).mean(dim=0) |
|
|
|
query_tokens = tokenizer.tokenize(query) |
|
paragraph_tokens = tokenizer.tokenize(paragraph) |
|
|
|
query_len = len(query_tokens) + 2 |
|
para_start_idx = query_len |
|
para_end_idx = len(inputs["input_ids"][0]) - 1 |
|
|
|
if para_end_idx <= para_start_idx: |
|
return round(base_relevance_score, 4), "No relevant tokens extracted." |
|
|
|
para_attention_scores = attention_scores[para_start_idx:para_end_idx, para_start_idx:para_end_idx].mean(dim=0) |
|
|
|
if para_attention_scores.numel() == 0: |
|
return round(base_relevance_score, 4), "No relevant tokens extracted." |
|
|
|
|
|
relevant_indices = (para_attention_scores > dynamic_threshold).nonzero(as_tuple=True)[0].tolist() |
|
|
|
|
|
highlighted_text = "" |
|
for idx, token in enumerate(paragraph_tokens): |
|
if idx in relevant_indices: |
|
highlighted_text += f"<b>{token}</b> " |
|
else: |
|
highlighted_text += f"{token} " |
|
|
|
highlighted_text = tokenizer.convert_tokens_to_string(highlighted_text.split()) |
|
|
|
return round(base_relevance_score, 4), highlighted_text |
|
|
|
|
|
interface = gr.Interface( |
|
fn=get_relevance_score_and_excerpt, |
|
inputs=[ |
|
gr.Textbox(label="Query", placeholder="Enter your search query..."), |
|
gr.Textbox(label="Document Paragraph", placeholder="Enter a paragraph to match..."), |
|
gr.Slider(minimum=0.02, maximum=0.5, value=0.1, step=0.01, label="Attention Threshold") |
|
], |
|
outputs=[ |
|
gr.Textbox(label="Relevance Score (Logits)"), |
|
gr.HTML(label="Highlighted Document Paragraph") |
|
], |
|
title="Cross-Encoder Attention Highlighting", |
|
description="Adjust the attention threshold to control token highlighting sensitivity.", |
|
allow_flagging="never", |
|
live=True |
|
) |
|
|
|
if __name__ == "__main__": |
|
interface.launch() |