|
import re |
|
import pandas as pd |
|
import glob |
|
import os |
|
from tqdm import tqdm |
|
from datasets import load_dataset |
|
|
|
|
|
print("Downloading the RedStone-Code-python dataset...") |
|
dataset = load_dataset("zjsd/RedStone-Code-python") |
|
print("Dataset downloaded successfully!") |
|
|
|
def detect_python_functions(text): |
|
if not isinstance(text, str): |
|
return [] |
|
|
|
function_pattern = r'def\s+\w+\s*\([^)]*\)\s*:' |
|
lines = text.split('\n') |
|
function_blocks = [] |
|
current_block = [] |
|
in_function = False |
|
|
|
for line in lines: |
|
if re.search(function_pattern, line): |
|
if current_block and in_function: |
|
function_blocks.append('\n'.join(current_block)) |
|
current_block = [] |
|
in_function = True |
|
current_block = [line] |
|
continue |
|
|
|
if in_function: |
|
if line.startswith(' ') or '\t' in line or not line.strip(): |
|
current_block.append(line) |
|
elif line.strip() and not line.startswith(' '): |
|
if current_block: |
|
function_blocks.append('\n'.join(current_block)) |
|
current_block = [] |
|
in_function = False |
|
|
|
if current_block and in_function: |
|
function_blocks.append('\n'.join(current_block)) |
|
|
|
def is_valid_function(block): |
|
has_python_keywords = any(keyword in block for keyword in |
|
['return', 'print', 'if', 'for', 'while', '=']) |
|
has_proper_indentation = any(line.startswith(' ') for line in block.split('\n')[1:]) |
|
return has_python_keywords and has_proper_indentation |
|
|
|
return [block for block in function_blocks if is_valid_function(block)] |
|
|
|
def has_python_functions(row): |
|
functions = detect_python_functions(row['text']) |
|
return len(functions) > 0 |
|
|
|
def process_in_batches(parquet_file, batch_size=10000): |
|
filtered_dfs = [] |
|
total_rows = pd.read_parquet(parquet_file, columns=[]).shape[0] |
|
|
|
print(f"\nStarting to process file with {total_rows} rows") |
|
|
|
pbar_desc = f"Processing {os.path.basename(parquet_file)}" |
|
with tqdm(total=total_rows, desc=pbar_desc, position=1, leave=False) as pbar: |
|
for batch_start in range(0, total_rows, batch_size): |
|
|
|
batch_df = pd.read_parquet( |
|
parquet_file, |
|
offset=batch_start, |
|
rows=min(batch_size, total_rows - batch_start) |
|
) |
|
|
|
|
|
print(f"\nProcessing batch of size: {len(batch_df)}") |
|
if len(batch_df) > 0: |
|
print(f"Sample text from batch:\n{batch_df['text'].iloc[0][:200]}...") |
|
|
|
|
|
filtered_batch = batch_df[batch_df.apply(has_python_functions, axis=1)].copy() |
|
print(f"Found {len(filtered_batch)} rows with functions in this batch") |
|
|
|
if len(filtered_batch) > 0: |
|
filtered_batch['python_functions'] = filtered_batch['text'].apply(detect_python_functions) |
|
filtered_dfs.append(filtered_batch) |
|
|
|
pbar.update(len(batch_df)) |
|
pbar.set_postfix({'Found': sum(len(df) for df in filtered_dfs)}) |
|
|
|
return pd.concat(filtered_dfs) if filtered_dfs else pd.DataFrame() |
|
|
|
|
|
output_dir = "filtered_python_functions" |
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
print("Converting dataset to parquet files...") |
|
for split in dataset.keys(): |
|
dataset[split].to_parquet(f"{output_dir}/redstone_{split}.parquet") |
|
|
|
|
|
parq_list = glob.glob(f"{output_dir}/redstone_*.parquet") |
|
|
|
if not parq_list: |
|
print("No parquet files found! Something went wrong with the dataset conversion.") |
|
exit(1) |
|
|
|
print(f"Found {len(parq_list)} parquet files to process") |
|
|
|
|
|
total_functions_found = 0 |
|
with tqdm(total=len(parq_list), desc="Overall Progress", position=0) as pbar_files: |
|
for idx, parquet_file in enumerate(parq_list): |
|
try: |
|
|
|
filtered_df = process_in_batches(parquet_file, batch_size=10000) |
|
|
|
|
|
if len(filtered_df) > 0: |
|
output_file = os.path.join(output_dir, f"python_functions_{idx}.parquet") |
|
filtered_df.to_parquet(output_file) |
|
total_functions_found += len(filtered_df) |
|
|
|
|
|
pbar_files.set_postfix({ |
|
'Current File': f"{idx + 1}/{len(parq_list)}", |
|
'Total Found': total_functions_found |
|
}) |
|
|
|
except Exception as e: |
|
print(f"\nError processing {parquet_file}: {str(e)}") |
|
continue |
|
|
|
pbar_files.update(1) |
|
|
|
print(f"\nProcessing complete! Total Python functions found: {total_functions_found}") |