File size: 1,849 Bytes
06696b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import os
import json
import faiss
import numpy as np
from sentence_transformers import SentenceTransformer
from tqdm import tqdm

# 1. 데이터 경둜 μ„€μ •
source_paths = [
    r"data/real_estate_agent/raw/past_papers/brokerage_law.jsonl",
    r"data/real_estate_agent/raw/past_papers/civil_law.jsonl",
    r"data/real_estate_agent/raw/past_papers/disclosure_taxation.jsonl",
    r"data/real_estate_agent/raw/past_papers/introduction.jsonl",
    r"data/real_estate_agent/raw/past_papers/public_law.jsonl",
]

INDEX_PATH = "data/index/index.faiss"
DOCS_PATH = "data/index/docs.npy"

# 2. μž„λ² λ”© λͺ¨λΈ λ‘œλ“œ
embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")

def init_faiss():
    questions = []

    # 3. JSONL 파일 읽기
    for path in source_paths:
        with open(path, "r", encoding="utf-8") as f:
            for line in f:
                data = json.loads(line)
                question_text = data.get("question", "")
                if question_text:  # 질문이 λΉ„μ–΄μžˆμ§€ μ•ŠμœΌλ©΄ μΆ”κ°€
                    questions.append(question_text)

    print(f"βœ… 총 {len(questions)}개 질문 λ‘œλ”© μ™„λ£Œ")

    # 4. μž„λ² λ”© 생성
    embeddings = embedding_model.encode(
        questions, 
        batch_size=32, 
        show_progress_bar=True
    )
    embeddings = np.array(embeddings).astype('float32')

    # 5. FAISS 인덱슀 생성
    dimension = embeddings.shape[1]
    index = faiss.IndexFlatL2(dimension)  # L2 거리 기반 인덱슀
    index.add(embeddings)

    # 6. μ €μž₯
    os.makedirs(os.path.dirname(INDEX_PATH), exist_ok=True)
    faiss.write_index(index, INDEX_PATH)
    np.save(DOCS_PATH, questions)

    print(f"βœ… FAISS μΈλ±μŠ€μ™€ λ¬Έμ„œ μ €μž₯ μ™„λ£Œ!")

if __name__ == "__main__":
    init_faiss()