File size: 5,131 Bytes
76abbed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
import re
import pandas as pd
import glob
import os
from tqdm import tqdm
from datasets import load_dataset
# Download the dataset first
print("Downloading the RedStone-Code-python dataset...")
dataset = load_dataset("zjsd/RedStone-Code-python")
print("Dataset downloaded successfully!")
def detect_python_functions(text):
if not isinstance(text, str):
return []
function_pattern = r'def\s+\w+\s*\([^)]*\)\s*:'
lines = text.split('\n')
function_blocks = []
current_block = []
in_function = False
for line in lines:
if re.search(function_pattern, line):
if current_block and in_function:
function_blocks.append('\n'.join(current_block))
current_block = []
in_function = True
current_block = [line]
continue
if in_function:
if line.startswith(' ') or '\t' in line or not line.strip():
current_block.append(line)
elif line.strip() and not line.startswith(' '):
if current_block:
function_blocks.append('\n'.join(current_block))
current_block = []
in_function = False
if current_block and in_function:
function_blocks.append('\n'.join(current_block))
def is_valid_function(block):
has_python_keywords = any(keyword in block for keyword in
['return', 'print', 'if', 'for', 'while', '='])
has_proper_indentation = any(line.startswith(' ') for line in block.split('\n')[1:])
return has_python_keywords and has_proper_indentation
return [block for block in function_blocks if is_valid_function(block)]
def has_python_functions(row):
functions = detect_python_functions(row['text'])
return len(functions) > 0
def process_in_batches(parquet_file, batch_size=10000):
filtered_dfs = []
total_rows = pd.read_parquet(parquet_file, columns=[]).shape[0]
print(f"\nStarting to process file with {total_rows} rows") # Debug line
pbar_desc = f"Processing {os.path.basename(parquet_file)}"
with tqdm(total=total_rows, desc=pbar_desc, position=1, leave=False) as pbar:
for batch_start in range(0, total_rows, batch_size):
# Read batch
batch_df = pd.read_parquet(
parquet_file,
offset=batch_start,
rows=min(batch_size, total_rows - batch_start)
)
# Debug lines
print(f"\nProcessing batch of size: {len(batch_df)}")
if len(batch_df) > 0:
print(f"Sample text from batch:\n{batch_df['text'].iloc[0][:200]}...")
# Filter and process batch
filtered_batch = batch_df[batch_df.apply(has_python_functions, axis=1)].copy()
print(f"Found {len(filtered_batch)} rows with functions in this batch") # Debug line
if len(filtered_batch) > 0:
filtered_batch['python_functions'] = filtered_batch['text'].apply(detect_python_functions)
filtered_dfs.append(filtered_batch)
pbar.update(len(batch_df))
pbar.set_postfix({'Found': sum(len(df) for df in filtered_dfs)})
return pd.concat(filtered_dfs) if filtered_dfs else pd.DataFrame()
# Create output directory
output_dir = "filtered_python_functions"
os.makedirs(output_dir, exist_ok=True)
# Convert dataset to parquet files if needed
print("Converting dataset to parquet files...")
for split in dataset.keys():
dataset[split].to_parquet(f"{output_dir}/redstone_{split}.parquet")
# Get list of parquet files
parq_list = glob.glob(f"{output_dir}/redstone_*.parquet")
if not parq_list:
print("No parquet files found! Something went wrong with the dataset conversion.")
exit(1)
print(f"Found {len(parq_list)} parquet files to process")
# Process all files with overall progress bar
total_functions_found = 0
with tqdm(total=len(parq_list), desc="Overall Progress", position=0) as pbar_files:
for idx, parquet_file in enumerate(parq_list):
try:
# Process the file in batches
filtered_df = process_in_batches(parquet_file, batch_size=10000)
# Save if we found any matches
if len(filtered_df) > 0:
output_file = os.path.join(output_dir, f"python_functions_{idx}.parquet")
filtered_df.to_parquet(output_file)
total_functions_found += len(filtered_df)
# Update progress bar with statistics
pbar_files.set_postfix({
'Current File': f"{idx + 1}/{len(parq_list)}",
'Total Found': total_functions_found
})
except Exception as e:
print(f"\nError processing {parquet_file}: {str(e)}")
continue
pbar_files.update(1)
print(f"\nProcessing complete! Total Python functions found: {total_functions_found}") |