Update app.py
Browse files
app.py
CHANGED
@@ -15,6 +15,8 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
15 |
from langchain_groq import ChatGroq
|
16 |
from dotenv import load_dotenv
|
17 |
from langchain.docstore.document import Document
|
|
|
|
|
18 |
|
19 |
# Load environment variables from .env file
|
20 |
load_dotenv()
|
@@ -165,7 +167,6 @@ def get_file(temp_dir, file_path):
|
|
165 |
return full_path
|
166 |
|
167 |
|
168 |
-
#getFilesFromRepo
|
169 |
def process_directory(directory, folder_paths, file_paths):
|
170 |
all_texts = []
|
171 |
file_references = []
|
@@ -198,22 +199,15 @@ def process_directory(directory, folder_paths, file_paths):
|
|
198 |
|
199 |
print(f"Total number of files: {len(files)}")
|
200 |
for file_path in files:
|
201 |
-
|
202 |
file_ext = os.path.splitext(file_path)[1]
|
203 |
-
|
204 |
if os.path.getsize(file_path) == 0:
|
205 |
print(f"Skipping an empty file: {file_path}")
|
206 |
continue
|
207 |
|
208 |
with open(file_path, 'rb') as f:
|
209 |
-
if file_ext in ['.rst', '.
|
210 |
text = f.read().decode('utf-8')
|
211 |
-
|
212 |
-
elif file_ext in ['.svg']:
|
213 |
-
text = f"SVG file content from {file_path}"
|
214 |
-
elif file_ext in ['.png', '.ico']:
|
215 |
-
text = f"Image metadata from {file_path}"
|
216 |
-
else:
|
217 |
continue
|
218 |
|
219 |
all_texts.append(text)
|
@@ -223,177 +217,16 @@ def process_directory(directory, folder_paths, file_paths):
|
|
223 |
file_references.append(relative_path)
|
224 |
print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAA: ", relative_path)
|
225 |
|
226 |
-
|
227 |
-
# Add this snippet after the 'all_texts.append(text)' and 'file_references.append(file_path)' lines.
|
228 |
-
|
229 |
-
|
230 |
return all_texts, file_references
|
231 |
|
232 |
-
# with tempfile.TemporaryDirectory() as tmpdirname:
|
233 |
-
# Unzip the file into the temporary directory
|
234 |
-
# with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
|
235 |
-
# zip_ref.extractall(tmpdirname)
|
236 |
-
|
237 |
|
238 |
-
# unzipped_root = os.listdir(tmpdirname)
|
239 |
|
240 |
-
|
241 |
-
def process_directory5(directory, partial_paths=None, file_paths=None):
|
242 |
-
all_texts = []
|
243 |
-
file_references = []
|
244 |
-
|
245 |
-
zip_files = [file for file in os.listdir(directory) if file.endswith('.zip')]
|
246 |
-
|
247 |
-
if not zip_files:
|
248 |
-
print("No zip file found in the directory.")
|
249 |
-
return all_texts, file_references
|
250 |
-
|
251 |
-
if len(zip_files) > 1:
|
252 |
-
print("More than one zip file found.")
|
253 |
-
return all_texts, file_references
|
254 |
-
else:
|
255 |
-
zip_file_path = os.path.join(directory, zip_files[0])
|
256 |
-
|
257 |
-
# Create a temporary directory for the zip file
|
258 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
259 |
-
# Unzip the file into the temporary directory
|
260 |
-
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
|
261 |
-
zip_ref.extractall(tmpdirname)
|
262 |
-
|
263 |
-
files = []
|
264 |
-
print("tmpdirname: " , tmpdirname)
|
265 |
-
unzipped_root = os.listdir(tmpdirname)
|
266 |
-
print("unzipped_root ", unzipped_root)
|
267 |
-
if len(unzipped_root) == 1 and os.path.isdir(os.path.join(tmpdirname, unzipped_root[0])):
|
268 |
-
tmpsubdirpath= os.path.join(tmpdirname, unzipped_root[0])
|
269 |
-
print("AYYYYYYY 11111")
|
270 |
-
else:
|
271 |
-
tmpsubdirpath = tmpdirname
|
272 |
-
print("AYYYYYYY 22222")
|
273 |
-
|
274 |
-
if not partial_paths and not file_paths:
|
275 |
-
for root, _, files_list in os.walk(tmpdirname):
|
276 |
-
for file in files_list:
|
277 |
-
files.append(os.path.join(root, file))
|
278 |
-
else:
|
279 |
-
if partial_paths:
|
280 |
-
for partial_path in partial_paths:
|
281 |
-
files += get_all_files_in_folder(tmpsubdirpath, partial_path)
|
282 |
-
if file_paths:
|
283 |
-
files += [get_file(tmpsubdirpath, file_path) for file_path in file_paths]
|
284 |
-
|
285 |
-
print(f"Total number of files: {len(files)}")
|
286 |
-
for file_path in files:
|
287 |
-
#print(f"Paths of files: {iles}")
|
288 |
-
file_ext = os.path.splitext(file_path)[1]
|
289 |
-
|
290 |
-
if os.path.getsize(file_path) == 0:
|
291 |
-
print(f"Skipping an empty file: {file_path}")
|
292 |
-
continue
|
293 |
-
|
294 |
-
with open(file_path, 'rb') as f:
|
295 |
-
if file_ext in ['.rst', '.md', '.txt', '.html', '.json', '.yaml', '.py']:
|
296 |
-
text = f.read().decode('utf-8')
|
297 |
-
elif file_ext in ['.svg']:
|
298 |
-
text = f"SVG file content from {file_path}"
|
299 |
-
elif file_ext in ['.png', '.ico']:
|
300 |
-
text = f"Image metadata from {file_path}"
|
301 |
-
else:
|
302 |
-
continue
|
303 |
-
|
304 |
-
all_texts.append(text)
|
305 |
-
file_references.append(file_path)
|
306 |
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
start_line, start_col = node.lineno - 1, node.col_offset
|
313 |
-
end_line = node.end_lineno - 1 if hasattr(node, 'end_lineno') else node.lineno - 1
|
314 |
-
end_col = node.end_col_offset if hasattr(node, 'end_col_offset') else len(source_lines[end_line])
|
315 |
-
|
316 |
-
lines = source_lines[start_line:end_line + 1]
|
317 |
-
lines[0] = lines[0][start_col:]
|
318 |
-
lines[-1] = lines[-1][:end_col]
|
319 |
-
|
320 |
-
return ''.join(lines)
|
321 |
-
|
322 |
-
from langchain.schema import Document
|
323 |
-
|
324 |
-
def chunk_python_file_content(content, char_limit=1572):
|
325 |
-
source_lines = content.splitlines(keepends=True)
|
326 |
-
|
327 |
-
# Parse the content into an abstract syntax tree (AST)
|
328 |
-
tree = ast.parse(content)
|
329 |
-
|
330 |
-
|
331 |
-
chunks = []
|
332 |
-
current_chunk = ""
|
333 |
-
current_chunk_size = 0
|
334 |
-
|
335 |
-
# Find all class definitions and top-level functions in the AST
|
336 |
-
class_nodes = [node for node in ast.walk(tree) if isinstance(node, ast.ClassDef)]
|
337 |
-
|
338 |
-
for class_node in class_nodes:
|
339 |
-
method_nodes = [node for node in class_node.body if isinstance(node, ast.FunctionDef)]
|
340 |
-
|
341 |
-
if method_nodes:
|
342 |
-
first_method_start_line = method_nodes[0].lineno - 1
|
343 |
-
class_def_lines = source_lines[class_node.lineno - 1:first_method_start_line]
|
344 |
-
else:
|
345 |
-
class_def_lines = source_lines[class_node.lineno - 1:class_node.end_lineno]
|
346 |
-
|
347 |
-
class_def = ''.join(class_def_lines)
|
348 |
-
class_def_size = len(class_def)
|
349 |
-
|
350 |
-
# Add class definition to the current chunk if it fits
|
351 |
-
if current_chunk_size + class_def_size <= char_limit:
|
352 |
-
current_chunk += f"{class_def.strip()}\n"
|
353 |
-
current_chunk_size += class_def_size
|
354 |
-
else:
|
355 |
-
# Start a new chunk if the class definition exceeds the limit
|
356 |
-
if current_chunk:
|
357 |
-
chunks.append(current_chunk.strip())
|
358 |
-
current_chunk = ""
|
359 |
-
current_chunk_size = 0
|
360 |
-
current_chunk += f"{class_def.strip()}\n"
|
361 |
-
current_chunk_size = class_def_size
|
362 |
-
|
363 |
-
for method_node in method_nodes:
|
364 |
-
method_def = get_source_segment(source_lines, method_node)
|
365 |
-
method_def_size = len(method_def)
|
366 |
-
|
367 |
-
# Add method definition to the current chunk if it fits
|
368 |
-
if current_chunk_size + method_def_size <= char_limit:
|
369 |
-
current_chunk += f"# This is a class method of class: {class_node.name}\n{method_def.strip()}\n"
|
370 |
-
current_chunk_size += method_def_size
|
371 |
-
else:
|
372 |
-
# Start a new chunk if the method definition exceeds the limit
|
373 |
-
if current_chunk:
|
374 |
-
chunks.append(current_chunk.strip())
|
375 |
-
current_chunk = ""
|
376 |
-
current_chunk_size = 0
|
377 |
-
current_chunk += f"# This is a class method of class: {class_node.name}\n{method_def.strip()}\n"
|
378 |
-
current_chunk_size = method_def_size
|
379 |
-
|
380 |
-
if current_chunk:
|
381 |
-
chunks.append(current_chunk.strip())
|
382 |
-
|
383 |
-
return chunks
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
# Split python code into chunks
|
388 |
-
def split_pythoncode_into_chunks(texts, references, chunk_size, chunk_overlap):
|
389 |
-
chunks = []
|
390 |
-
|
391 |
-
for text, reference in zip(texts, references):
|
392 |
-
file_chunks = chunk_python_file_content(text, char_limit=chunk_size)
|
393 |
-
|
394 |
-
for chunk in file_chunks:
|
395 |
-
document = Document(page_content=chunk, metadata={"source": reference})
|
396 |
-
chunks.append(document)
|
397 |
return chunks
|
398 |
|
399 |
|
@@ -445,7 +278,7 @@ def rag_workflow(query):
|
|
445 |
|
446 |
doc_context = "\n\n".join([doc_chunk for doc_chunk, _ in retrieved_doc_chunks])
|
447 |
code_context = "\n\n".join([code_chunk for code_chunk, _ in retrieved_code_chunks])
|
448 |
-
|
449 |
doc_references = "\n".join([f"[{i+1}] {ref}" for i, (_, ref) in enumerate(retrieved_doc_chunks)])
|
450 |
code_references = "\n".join([f"[{i+1}] {ref}" for i, (_, ref) in enumerate(retrieved_code_chunks)])
|
451 |
|
@@ -505,15 +338,15 @@ def initialize():
|
|
505 |
doc_partial_paths = ['docs/source/setup/']
|
506 |
doc_file_paths = ['docs/source/usage/lib.rst']
|
507 |
|
508 |
-
|
509 |
print("LEEEEEEEEEEEENGTH of code_files: ", len(code_files))
|
510 |
|
511 |
|
512 |
-
doc_files, doc_file_references = process_directory(DATA_DIR,doc_partial_paths, doc_file_paths)
|
513 |
print("LEEEEEEEEEEEENGTH of doc_files: ", len(doc_files))
|
514 |
|
515 |
-
code_chunks =
|
516 |
-
doc_chunks = split_into_chunks(
|
517 |
|
518 |
print(f"Total number of code_chunks: {len(code_chunks)}")
|
519 |
print(f"Total number of doc_chunks: {len(doc_chunks)}")
|
|
|
15 |
from langchain_groq import ChatGroq
|
16 |
from dotenv import load_dotenv
|
17 |
from langchain.docstore.document import Document
|
18 |
+
from langchain.schema import Document
|
19 |
+
from chunk_python_code import chunk_python_code_with_metadata
|
20 |
|
21 |
# Load environment variables from .env file
|
22 |
load_dotenv()
|
|
|
167 |
return full_path
|
168 |
|
169 |
|
|
|
170 |
def process_directory(directory, folder_paths, file_paths):
|
171 |
all_texts = []
|
172 |
file_references = []
|
|
|
199 |
|
200 |
print(f"Total number of files: {len(files)}")
|
201 |
for file_path in files:
|
202 |
+
|
203 |
file_ext = os.path.splitext(file_path)[1]
|
|
|
204 |
if os.path.getsize(file_path) == 0:
|
205 |
print(f"Skipping an empty file: {file_path}")
|
206 |
continue
|
207 |
|
208 |
with open(file_path, 'rb') as f:
|
209 |
+
if file_ext in ['.rst', '.py']:
|
210 |
text = f.read().decode('utf-8')
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
continue
|
212 |
|
213 |
all_texts.append(text)
|
|
|
217 |
file_references.append(relative_path)
|
218 |
print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAA: ", relative_path)
|
219 |
|
|
|
|
|
|
|
|
|
220 |
return all_texts, file_references
|
221 |
|
|
|
|
|
|
|
|
|
|
|
222 |
|
|
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
|
225 |
+
def split_python_code_into_chunks(texts, file_paths):
|
226 |
+
chunks = []
|
227 |
+
for text, file_path in zip(texts, file_paths):
|
228 |
+
document_chunks = chunk_python_code_with_metadata(text, file_path)
|
229 |
+
chunks.extend(document_chunks)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
return chunks
|
231 |
|
232 |
|
|
|
278 |
|
279 |
doc_context = "\n\n".join([doc_chunk for doc_chunk, _ in retrieved_doc_chunks])
|
280 |
code_context = "\n\n".join([code_chunk for code_chunk, _ in retrieved_code_chunks])
|
281 |
+
|
282 |
doc_references = "\n".join([f"[{i+1}] {ref}" for i, (_, ref) in enumerate(retrieved_doc_chunks)])
|
283 |
code_references = "\n".join([f"[{i+1}] {ref}" for i, (_, ref) in enumerate(retrieved_code_chunks)])
|
284 |
|
|
|
338 |
doc_partial_paths = ['docs/source/setup/']
|
339 |
doc_file_paths = ['docs/source/usage/lib.rst']
|
340 |
|
341 |
+
code_texts, code_text_references = process_directory(DATA_DIR, code_partial_paths, code_file_paths)
|
342 |
print("LEEEEEEEEEEEENGTH of code_files: ", len(code_files))
|
343 |
|
344 |
|
345 |
+
doc_files, doc_file_references = process_directory(DATA_DIR, doc_partial_paths, doc_file_paths)
|
346 |
print("LEEEEEEEEEEEENGTH of doc_files: ", len(doc_files))
|
347 |
|
348 |
+
code_chunks = split_python_code_into_chunks(code_texts, code_text_references)
|
349 |
+
doc_chunks = split_into_chunks(doc_file_texts, doc_file_references, CHUNK_SIZE, CHUNK_OVERLAP)
|
350 |
|
351 |
print(f"Total number of code_chunks: {len(code_chunks)}")
|
352 |
print(f"Total number of doc_chunks: {len(doc_chunks)}")
|