File size: 9,430 Bytes
b34efa5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
"""
Text chunking module for Norwegian RAG chatbot.
Splits documents into manageable chunks for embedding and retrieval.
"""

import re
from typing import List, Optional, Tuple

from ..api.config import CHUNK_SIZE, CHUNK_OVERLAP

class TextChunker:
    """
    Splits documents into manageable chunks for embedding and retrieval.
    Supports different chunking strategies optimized for Norwegian text.
    """
    
    @staticmethod
    def chunk_text(
        text: str,
        chunk_size: int = CHUNK_SIZE,
        chunk_overlap: int = CHUNK_OVERLAP,
        strategy: str = "paragraph"
    ) -> List[str]:
        """
        Split text into chunks using the specified strategy.
        
        Args:
            text: Text to split into chunks
            chunk_size: Maximum size of each chunk
            chunk_overlap: Overlap between consecutive chunks
            strategy: Chunking strategy ('fixed', 'paragraph', or 'sentence')
            
        Returns:
            List of text chunks
        """
        if not text:
            return []
        
        if strategy == "fixed":
            return TextChunker.fixed_size_chunks(text, chunk_size, chunk_overlap)
        elif strategy == "paragraph":
            return TextChunker.paragraph_chunks(text, chunk_size, chunk_overlap)
        elif strategy == "sentence":
            return TextChunker.sentence_chunks(text, chunk_size, chunk_overlap)
        else:
            raise ValueError(f"Unknown chunking strategy: {strategy}")
    
    @staticmethod
    def fixed_size_chunks(
        text: str,
        chunk_size: int = CHUNK_SIZE,
        chunk_overlap: int = CHUNK_OVERLAP
    ) -> List[str]:
        """
        Split text into fixed-size chunks with overlap.
        
        Args:
            text: Text to split into chunks
            chunk_size: Maximum size of each chunk
            chunk_overlap: Overlap between consecutive chunks
            
        Returns:
            List of text chunks
        """
        if not text:
            return []
        
        chunks = []
        start = 0
        text_length = len(text)
        
        while start < text_length:
            end = min(start + chunk_size, text_length)
            
            # If this is not the first chunk and we're not at the end,
            # try to find a good breaking point (whitespace)
            if start > 0 and end < text_length:
                # Look for the last whitespace within the chunk
                last_whitespace = text.rfind(' ', start, end)
                if last_whitespace != -1:
                    end = last_whitespace + 1  # Include the space
            
            # Add the chunk
            chunks.append(text[start:end].strip())
            
            # Move the start position for the next chunk, considering overlap
            start = end - chunk_overlap if end < text_length else text_length
        
        return chunks
    
    @staticmethod
    def paragraph_chunks(
        text: str,
        max_chunk_size: int = CHUNK_SIZE,
        chunk_overlap: int = CHUNK_OVERLAP
    ) -> List[str]:
        """
        Split text into chunks based on paragraphs.
        
        Args:
            text: Text to split into chunks
            max_chunk_size: Maximum size of each chunk
            chunk_overlap: Overlap between consecutive chunks
            
        Returns:
            List of text chunks
        """
        if not text:
            return []
        
        # Split text into paragraphs
        paragraphs = re.split(r'\n\s*\n', text)
        paragraphs = [p.strip() for p in paragraphs if p.strip()]
        
        chunks = []
        current_chunk = []
        current_size = 0
        
        for paragraph in paragraphs:
            paragraph_size = len(paragraph)
            
            # If adding this paragraph would exceed the max chunk size and we already have content,
            # save the current chunk and start a new one
            if current_size + paragraph_size > max_chunk_size and current_chunk:
                chunks.append('\n\n'.join(current_chunk))
                
                # For overlap, keep some paragraphs from the previous chunk
                overlap_size = 0
                overlap_paragraphs = []
                
                # Add paragraphs from the end until we reach the desired overlap
                for p in reversed(current_chunk):
                    if overlap_size + len(p) <= chunk_overlap:
                        overlap_paragraphs.insert(0, p)
                        overlap_size += len(p)
                    else:
                        break
                
                current_chunk = overlap_paragraphs
                current_size = overlap_size
            
            # If the paragraph itself is larger than the max chunk size, split it further
            if paragraph_size > max_chunk_size:
                # First, add the current chunk if it's not empty
                if current_chunk:
                    chunks.append('\n\n'.join(current_chunk))
                    current_chunk = []
                    current_size = 0
                
                # Then split the large paragraph into fixed-size chunks
                paragraph_chunks = TextChunker.fixed_size_chunks(paragraph, max_chunk_size, chunk_overlap)
                chunks.extend(paragraph_chunks)
            else:
                # Add the paragraph to the current chunk
                current_chunk.append(paragraph)
                current_size += paragraph_size
        
        # Add the last chunk if it's not empty
        if current_chunk:
            chunks.append('\n\n'.join(current_chunk))
        
        return chunks
    
    @staticmethod
    def sentence_chunks(
        text: str,
        max_chunk_size: int = CHUNK_SIZE,
        chunk_overlap: int = CHUNK_OVERLAP
    ) -> List[str]:
        """
        Split text into chunks based on sentences.
        
        Args:
            text: Text to split into chunks
            max_chunk_size: Maximum size of each chunk
            chunk_overlap: Overlap between consecutive chunks
            
        Returns:
            List of text chunks
        """
        if not text:
            return []
        
        # Norwegian-aware sentence splitting
        # This pattern handles common Norwegian sentence endings
        sentence_pattern = r'(?<=[.!?])\s+(?=[A-ZÆØÅ])'
        sentences = re.split(sentence_pattern, text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        chunks = []
        current_chunk = []
        current_size = 0
        
        for sentence in sentences:
            sentence_size = len(sentence)
            
            # If adding this sentence would exceed the max chunk size and we already have content,
            # save the current chunk and start a new one
            if current_size + sentence_size > max_chunk_size and current_chunk:
                chunks.append(' '.join(current_chunk))
                
                # For overlap, keep some sentences from the previous chunk
                overlap_size = 0
                overlap_sentences = []
                
                # Add sentences from the end until we reach the desired overlap
                for s in reversed(current_chunk):
                    if overlap_size + len(s) <= chunk_overlap:
                        overlap_sentences.insert(0, s)
                        overlap_size += len(s)
                    else:
                        break
                
                current_chunk = overlap_sentences
                current_size = overlap_size
            
            # If the sentence itself is larger than the max chunk size, split it further
            if sentence_size > max_chunk_size:
                # First, add the current chunk if it's not empty
                if current_chunk:
                    chunks.append(' '.join(current_chunk))
                    current_chunk = []
                    current_size = 0
                
                # Then split the large sentence into fixed-size chunks
                sentence_chunks = TextChunker.fixed_size_chunks(sentence, max_chunk_size, chunk_overlap)
                chunks.extend(sentence_chunks)
            else:
                # Add the sentence to the current chunk
                current_chunk.append(sentence)
                current_size += sentence_size
        
        # Add the last chunk if it's not empty
        if current_chunk:
            chunks.append(' '.join(current_chunk))
        
        return chunks
    
    @staticmethod
    def clean_chunk(chunk: str) -> str:
        """
        Clean a text chunk by removing excessive whitespace and normalizing.
        
        Args:
            chunk: Text chunk to clean
            
        Returns:
            Cleaned text chunk
        """
        if not chunk:
            return ""
        
        # Replace multiple whitespace with a single space
        cleaned = re.sub(r'\s+', ' ', chunk)
        
        # Normalize Norwegian characters (if needed)
        # This ensures consistent handling of æ, ø, å
        cleaned = cleaned.replace('æ', 'æ').replace('Æ', 'Æ')
        cleaned = cleaned.replace('ø', 'ø').replace('Ø', 'Ø')
        cleaned = cleaned.replace('å', 'å').replace('Å', 'Å')
        
        return cleaned.strip()