Create filter.py
Browse files
filter.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import pandas as pd
|
3 |
+
import glob
|
4 |
+
import os
|
5 |
+
from tqdm import tqdm
|
6 |
+
from datasets import load_dataset
|
7 |
+
|
8 |
+
# Download the dataset first
|
9 |
+
print("Downloading the RedStone-Code-python dataset...")
|
10 |
+
dataset = load_dataset("zjsd/RedStone-Code-python")
|
11 |
+
print("Dataset downloaded successfully!")
|
12 |
+
|
13 |
+
def detect_python_functions(text):
|
14 |
+
if not isinstance(text, str):
|
15 |
+
return []
|
16 |
+
|
17 |
+
function_pattern = r'def\s+\w+\s*\([^)]*\)\s*:'
|
18 |
+
lines = text.split('\n')
|
19 |
+
function_blocks = []
|
20 |
+
current_block = []
|
21 |
+
in_function = False
|
22 |
+
|
23 |
+
for line in lines:
|
24 |
+
if re.search(function_pattern, line):
|
25 |
+
if current_block and in_function:
|
26 |
+
function_blocks.append('\n'.join(current_block))
|
27 |
+
current_block = []
|
28 |
+
in_function = True
|
29 |
+
current_block = [line]
|
30 |
+
continue
|
31 |
+
|
32 |
+
if in_function:
|
33 |
+
if line.startswith(' ') or '\t' in line or not line.strip():
|
34 |
+
current_block.append(line)
|
35 |
+
elif line.strip() and not line.startswith(' '):
|
36 |
+
if current_block:
|
37 |
+
function_blocks.append('\n'.join(current_block))
|
38 |
+
current_block = []
|
39 |
+
in_function = False
|
40 |
+
|
41 |
+
if current_block and in_function:
|
42 |
+
function_blocks.append('\n'.join(current_block))
|
43 |
+
|
44 |
+
def is_valid_function(block):
|
45 |
+
has_python_keywords = any(keyword in block for keyword in
|
46 |
+
['return', 'print', 'if', 'for', 'while', '='])
|
47 |
+
has_proper_indentation = any(line.startswith(' ') for line in block.split('\n')[1:])
|
48 |
+
return has_python_keywords and has_proper_indentation
|
49 |
+
|
50 |
+
return [block for block in function_blocks if is_valid_function(block)]
|
51 |
+
|
52 |
+
def has_python_functions(row):
|
53 |
+
functions = detect_python_functions(row['text'])
|
54 |
+
return len(functions) > 0
|
55 |
+
|
56 |
+
def process_in_batches(parquet_file, batch_size=10000):
|
57 |
+
filtered_dfs = []
|
58 |
+
total_rows = pd.read_parquet(parquet_file, columns=[]).shape[0]
|
59 |
+
|
60 |
+
print(f"\nStarting to process file with {total_rows} rows") # Debug line
|
61 |
+
|
62 |
+
pbar_desc = f"Processing {os.path.basename(parquet_file)}"
|
63 |
+
with tqdm(total=total_rows, desc=pbar_desc, position=1, leave=False) as pbar:
|
64 |
+
for batch_start in range(0, total_rows, batch_size):
|
65 |
+
# Read batch
|
66 |
+
batch_df = pd.read_parquet(
|
67 |
+
parquet_file,
|
68 |
+
offset=batch_start,
|
69 |
+
rows=min(batch_size, total_rows - batch_start)
|
70 |
+
)
|
71 |
+
|
72 |
+
# Debug lines
|
73 |
+
print(f"\nProcessing batch of size: {len(batch_df)}")
|
74 |
+
if len(batch_df) > 0:
|
75 |
+
print(f"Sample text from batch:\n{batch_df['text'].iloc[0][:200]}...")
|
76 |
+
|
77 |
+
# Filter and process batch
|
78 |
+
filtered_batch = batch_df[batch_df.apply(has_python_functions, axis=1)].copy()
|
79 |
+
print(f"Found {len(filtered_batch)} rows with functions in this batch") # Debug line
|
80 |
+
|
81 |
+
if len(filtered_batch) > 0:
|
82 |
+
filtered_batch['python_functions'] = filtered_batch['text'].apply(detect_python_functions)
|
83 |
+
filtered_dfs.append(filtered_batch)
|
84 |
+
|
85 |
+
pbar.update(len(batch_df))
|
86 |
+
pbar.set_postfix({'Found': sum(len(df) for df in filtered_dfs)})
|
87 |
+
|
88 |
+
return pd.concat(filtered_dfs) if filtered_dfs else pd.DataFrame()
|
89 |
+
|
90 |
+
# Create output directory
|
91 |
+
output_dir = "filtered_python_functions"
|
92 |
+
os.makedirs(output_dir, exist_ok=True)
|
93 |
+
|
94 |
+
# Convert dataset to parquet files if needed
|
95 |
+
print("Converting dataset to parquet files...")
|
96 |
+
for split in dataset.keys():
|
97 |
+
dataset[split].to_parquet(f"{output_dir}/redstone_{split}.parquet")
|
98 |
+
|
99 |
+
# Get list of parquet files
|
100 |
+
parq_list = glob.glob(f"{output_dir}/redstone_*.parquet")
|
101 |
+
|
102 |
+
if not parq_list:
|
103 |
+
print("No parquet files found! Something went wrong with the dataset conversion.")
|
104 |
+
exit(1)
|
105 |
+
|
106 |
+
print(f"Found {len(parq_list)} parquet files to process")
|
107 |
+
|
108 |
+
# Process all files with overall progress bar
|
109 |
+
total_functions_found = 0
|
110 |
+
with tqdm(total=len(parq_list), desc="Overall Progress", position=0) as pbar_files:
|
111 |
+
for idx, parquet_file in enumerate(parq_list):
|
112 |
+
try:
|
113 |
+
# Process the file in batches
|
114 |
+
filtered_df = process_in_batches(parquet_file, batch_size=10000)
|
115 |
+
|
116 |
+
# Save if we found any matches
|
117 |
+
if len(filtered_df) > 0:
|
118 |
+
output_file = os.path.join(output_dir, f"python_functions_{idx}.parquet")
|
119 |
+
filtered_df.to_parquet(output_file)
|
120 |
+
total_functions_found += len(filtered_df)
|
121 |
+
|
122 |
+
# Update progress bar with statistics
|
123 |
+
pbar_files.set_postfix({
|
124 |
+
'Current File': f"{idx + 1}/{len(parq_list)}",
|
125 |
+
'Total Found': total_functions_found
|
126 |
+
})
|
127 |
+
|
128 |
+
except Exception as e:
|
129 |
+
print(f"\nError processing {parquet_file}: {str(e)}")
|
130 |
+
continue
|
131 |
+
|
132 |
+
pbar_files.update(1)
|
133 |
+
|
134 |
+
print(f"\nProcessing complete! Total Python functions found: {total_functions_found}")
|