File size: 1,039 Bytes
06696b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# 1. Reranker λͺ¨λΈ λ‘œλ”©
reranker_tokenizer = AutoTokenizer.from_pretrained("BAAI/bge-reranker-base")
reranker_model = AutoModelForSequenceClassification.from_pretrained("BAAI/bge-reranker-base")

def rerank_documents(query: str, docs: list, top_k: int = 5) -> list:
    """

    κ²€μƒ‰λœ λ¬Έμ„œ 리슀트λ₯Ό Query와 λΉ„κ΅ν•΄μ„œ relevance μˆœμ„œλ‘œ μž¬μ •λ ¬ν•œλ‹€.

    """
    pairs = [(query, doc) for doc in docs]
    
    inputs = reranker_tokenizer.batch_encode_plus(
        pairs,
        padding=True,
        truncation=True,
        return_tensors="pt",
        max_length=512
    )
    
    with torch.no_grad():
        scores = reranker_model(**inputs).logits.squeeze(-1)  # (batch_size,)
    
    scores = scores.tolist()
    
    # 점수 높은 μˆœμ„œλŒ€λ‘œ μ •λ ¬
    sorted_docs = [doc for _, doc in sorted(zip(scores, docs), key=lambda x: x[0], reverse=True)]
    
    return sorted_docs[:top_k]