|
import argparse |
|
import json |
|
import os |
|
import traceback |
|
|
|
import numpy as np |
|
import pandas as pd |
|
from tqdm import tqdm |
|
|
|
from utils.utils import get_question_pairs |
|
from utils.metrics import evaluate_gene_selection |
|
from tools.statistics import get_gene_regressors |
|
|
|
def average_metrics(metrics_list): |
|
"""Average a list of metric dictionaries.""" |
|
if not metrics_list: |
|
return {} |
|
|
|
avg_metrics = {} |
|
for metric in metrics_list[0]: |
|
if isinstance(metrics_list[0][metric], (int, float)): |
|
avg_metrics[metric] = float(np.round(np.nanmean([p[metric] for p in metrics_list]), 2)) |
|
|
|
return avg_metrics |
|
|
|
|
|
def evaluate_dataset_selection(pred_dir, ref_dir): |
|
""" |
|
Evaluate dataset filtering and selection by comparing predicted and reference cohort info files. |
|
|
|
This function evaluates two aspects: |
|
1. Dataset Filtering (DF): Binary classification of dataset availability (is_available) |
|
2. Dataset Selection (DS): Accuracy in selecting the best dataset(s) for each problem |
|
|
|
Args: |
|
pred_dir: Path to prediction directory |
|
ref_dir: Path to reference directory |
|
|
|
Returns: |
|
Dictionary of evaluation metrics for dataset filtering and selection |
|
""" |
|
|
|
filtering_metrics_list = [] |
|
selection_metrics_list = [] |
|
|
|
|
|
seen_traits = set() |
|
|
|
|
|
task_info_file = './metadata/task_info.json' |
|
all_pairs = get_question_pairs(task_info_file) |
|
|
|
|
|
with tqdm(total=len(all_pairs), desc="Evaluating dataset filtering and selection") as pbar: |
|
for i, (trait, condition) in enumerate(all_pairs): |
|
|
|
trait_filtering_metrics = {'tp': 0, 'fp': 0, 'tn': 0, 'fn': 0} |
|
problem_selection_metrics = {'accuracy': 0.0} |
|
|
|
|
|
ref_trait_dir = os.path.join(ref_dir, 'preprocess', trait) |
|
pred_trait_dir = os.path.join(pred_dir, 'preprocess', trait) |
|
ref_trait_info_path = os.path.join(ref_trait_dir, 'cohort_info.json') |
|
pred_trait_info_path = os.path.join(pred_trait_dir, 'cohort_info.json') |
|
|
|
if not os.path.exists(ref_trait_info_path): |
|
print(f"Warning: Reference cohort info not found at '{ref_trait_info_path}'") |
|
pbar.update(1) |
|
continue |
|
|
|
if not os.path.exists(pred_trait_info_path): |
|
print(f"Warning: Prediction cohort info not found at '{pred_trait_info_path}'") |
|
pbar.update(1) |
|
continue |
|
|
|
try: |
|
|
|
with open(ref_trait_info_path, 'r') as f: |
|
ref_trait_info = json.load(f) |
|
|
|
with open(pred_trait_info_path, 'r') as f: |
|
pred_trait_info = json.load(f) |
|
|
|
|
|
if trait not in seen_traits: |
|
|
|
for cohort_id in set(ref_trait_info.keys()).union(set(pred_trait_info.keys())): |
|
ref_available = ref_trait_info.get(cohort_id, {}).get('is_available', False) |
|
pred_available = pred_trait_info.get(cohort_id, {}).get('is_available', False) |
|
|
|
if ref_available and pred_available: |
|
trait_filtering_metrics['tp'] += 1 |
|
elif ref_available and not pred_available: |
|
trait_filtering_metrics['fn'] += 1 |
|
elif not ref_available and pred_available: |
|
trait_filtering_metrics['fp'] += 1 |
|
else: |
|
trait_filtering_metrics['tn'] += 1 |
|
|
|
|
|
filtering_result = calculate_metrics_from_confusion( |
|
trait_filtering_metrics['tp'], |
|
trait_filtering_metrics['fp'], |
|
trait_filtering_metrics['tn'], |
|
trait_filtering_metrics['fn'] |
|
) |
|
|
|
|
|
filtering_result['trait'] = trait |
|
|
|
|
|
filtering_metrics_list.append(filtering_result) |
|
|
|
|
|
seen_traits.add(trait) |
|
|
|
|
|
ref_selection = select_cohorts( |
|
root_dir=ref_dir, |
|
trait=trait, |
|
condition=condition |
|
) |
|
|
|
pred_selection = select_cohorts( |
|
root_dir=pred_dir, |
|
trait=trait, |
|
condition=condition |
|
) |
|
|
|
|
|
if ref_selection == pred_selection: |
|
problem_selection_metrics['accuracy'] = 100.0 |
|
|
|
|
|
problem_selection_metrics['trait'] = trait |
|
problem_selection_metrics['condition'] = condition |
|
|
|
selection_metrics_list.append(problem_selection_metrics) |
|
|
|
|
|
if (i + 1) % 5 == 0 or i == 0 or i == len(all_pairs) - 1: |
|
|
|
display_running_average( |
|
pbar, |
|
filtering_metrics_list, |
|
"Dataset filtering", |
|
['precision', 'recall', 'f1', 'accuracy'], |
|
selection_metrics_list, |
|
"Dataset selection", |
|
['accuracy'] |
|
) |
|
|
|
except Exception as e: |
|
print(f"Error evaluating {trait}-{condition}: {str(e)}") |
|
print(traceback.format_exc()) |
|
|
|
pbar.update(1) |
|
|
|
|
|
avg_filtering_metrics = average_metrics(filtering_metrics_list) |
|
avg_selection_metrics = average_metrics(selection_metrics_list) |
|
|
|
return { |
|
'filtering_metrics': { |
|
'per_trait': filtering_metrics_list, |
|
'average': avg_filtering_metrics |
|
}, |
|
'selection_metrics': { |
|
'per_problem': selection_metrics_list, |
|
'average': avg_selection_metrics |
|
} |
|
} |
|
|
|
|
|
def select_cohorts(root_dir, trait, condition=None, gene_info_path='./metadata/task_info.json'): |
|
""" |
|
Select the best cohort or cohort pair for analysis. |
|
Unified function that handles both one-step and two-step dataset selection. |
|
|
|
Args: |
|
root_dir: Base directory containing output data |
|
trait: Name of the trait |
|
condition: Name of the condition (optional) |
|
gene_info_path: Path to gene info metadata file (default: './metadata/task_info.json') |
|
|
|
Returns: |
|
For one-step: Selected cohort ID or None if no suitable cohort found |
|
For two-step: Tuple of (trait_cohort_id, condition_cohort_id) or None if no suitable pair found |
|
""" |
|
|
|
trait_dir = os.path.join(root_dir, 'preprocess', trait) |
|
trait_info_path = os.path.join(trait_dir, 'cohort_info.json') |
|
|
|
|
|
if not os.path.exists(trait_info_path): |
|
print(f"Warning: Trait cohort info not found for '{trait}'") |
|
return None |
|
|
|
|
|
with open(trait_info_path, 'r') as f: |
|
trait_info = json.load(f) |
|
|
|
|
|
if condition is None or condition.lower() in ['age', 'gender', 'none']: |
|
|
|
usable_cohorts = {} |
|
for cohort_id, info in trait_info.items(): |
|
if info.get('is_usable', False): |
|
|
|
if condition == 'Age' and not info.get('has_age', False): |
|
continue |
|
elif condition == 'Gender' and not info.get('has_gender', False): |
|
continue |
|
usable_cohorts[cohort_id] = info |
|
|
|
if not usable_cohorts: |
|
return None |
|
|
|
|
|
return max(usable_cohorts.items(), key=lambda x: x[1].get('sample_size', 0))[0] |
|
|
|
|
|
else: |
|
|
|
condition_dir = os.path.join(root_dir, 'preprocess', condition) |
|
condition_info_path = os.path.join(condition_dir, 'cohort_info.json') |
|
|
|
|
|
if not os.path.exists(condition_info_path): |
|
print(f"Warning: Condition cohort info not found for '{condition}'") |
|
return None |
|
|
|
|
|
with open(condition_info_path, 'r') as f: |
|
condition_info = json.load(f) |
|
|
|
|
|
usable_trait_cohorts = {k: v for k, v in trait_info.items() if v.get('is_usable', False)} |
|
usable_condition_cohorts = {k: v for k, v in condition_info.items() if v.get('is_usable', False)} |
|
|
|
if not usable_trait_cohorts or not usable_condition_cohorts: |
|
return None |
|
|
|
|
|
pairs = [] |
|
for trait_id, trait_info_item in usable_trait_cohorts.items(): |
|
for cond_id, cond_info_item in usable_condition_cohorts.items(): |
|
trait_size = trait_info_item.get('sample_size', 0) |
|
cond_size = cond_info_item.get('sample_size', 0) |
|
pairs.append((trait_id, cond_id, trait_size * cond_size)) |
|
|
|
|
|
pairs.sort(key=lambda x: x[2], reverse=True) |
|
|
|
|
|
for trait_id, cond_id, _ in pairs: |
|
trait_data_path = os.path.join(trait_dir, f"{trait_id}.csv") |
|
condition_data_path = os.path.join(condition_dir, f"{cond_id}.csv") |
|
|
|
if os.path.exists(trait_data_path) and os.path.exists(condition_data_path): |
|
|
|
try: |
|
trait_data = pd.read_csv(trait_data_path, index_col=0).astype('float') |
|
condition_data = pd.read_csv(condition_data_path, index_col=0).astype('float') |
|
|
|
|
|
gene_regressors = get_gene_regressors(trait, condition, trait_data, condition_data, gene_info_path) |
|
|
|
if gene_regressors: |
|
return trait_id, cond_id |
|
except Exception as e: |
|
print(f"Error processing pair ({trait_id}, {cond_id}): {str(e)}") |
|
|
|
continue |
|
|
|
|
|
return None |
|
|
|
|
|
def calculate_metrics_from_confusion(tp, fp, tn, fn): |
|
""" |
|
Calculate precision, recall, F1, and accuracy from confusion matrix values. |
|
|
|
Args: |
|
tp: True positives |
|
fp: False positives |
|
tn: True negatives |
|
fn: False negatives |
|
|
|
Returns: |
|
Dictionary of metrics |
|
""" |
|
precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0 |
|
recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0 |
|
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0 |
|
accuracy = (tp + tn) / (tp + tn + fp + fn) if (tp + tn + fp + fn) > 0 else 0.0 |
|
|
|
return { |
|
'precision': precision * 100, |
|
'recall': recall * 100, |
|
'f1': f1 * 100, |
|
'accuracy': accuracy * 100 |
|
} |
|
|
|
|
|
def calculate_jaccard(set1, set2): |
|
"""Calculate Jaccard similarity between two sets.""" |
|
intersection = len(set1.intersection(set2)) |
|
union = len(set1.union(set2)) |
|
return 0.0 if union == 0 else intersection / union |
|
|
|
|
|
def calculate_pearson_correlation(df1, df2): |
|
"""Calculate Pearson correlation between common features in two dataframes. |
|
Optimized for large datasets using numpy vectorization.""" |
|
common_samples = df1.index.intersection(df2.index) |
|
common_features = df1.columns.intersection(df2.columns) |
|
|
|
if len(common_samples) == 0 or len(common_features) == 0: |
|
return 0.0 |
|
|
|
|
|
aligned_df1 = df1.loc[common_samples, common_features] |
|
aligned_df2 = df2.loc[common_samples, common_features] |
|
|
|
|
|
aligned_df1 = aligned_df1.fillna(aligned_df1.mean()) |
|
aligned_df2 = aligned_df2.fillna(aligned_df2.mean()) |
|
|
|
|
|
aligned_df1 = aligned_df1.fillna(0.0) |
|
aligned_df2 = aligned_df2.fillna(0.0) |
|
|
|
|
|
try: |
|
|
|
X = aligned_df1.values |
|
Y = aligned_df2.values |
|
n_samples = X.shape[0] |
|
|
|
|
|
X_centered = X - np.mean(X, axis=0) |
|
Y_centered = Y - np.mean(Y, axis=0) |
|
|
|
|
|
X_std = np.std(X, axis=0) |
|
Y_std = np.std(Y, axis=0) |
|
|
|
|
|
valid_cols = (X_std != 0) & (Y_std != 0) |
|
|
|
if not np.any(valid_cols): |
|
return 0.0 |
|
|
|
|
|
|
|
numerator = np.sum(X_centered[:, valid_cols] * Y_centered[:, valid_cols], axis=0) |
|
denominator = n_samples * X_std[valid_cols] * Y_std[valid_cols] |
|
correlations = numerator / denominator |
|
|
|
|
|
correlations = np.nan_to_num(correlations, nan=0.0) |
|
|
|
|
|
return float(np.mean(correlations)) |
|
except Exception as e: |
|
print(f"Error calculating Pearson correlation: {str(e)}") |
|
return 0.0 |
|
|
|
|
|
def evaluate_csv(pred_file_path, ref_file_path, subtask="linked"): |
|
""" |
|
Evaluate preprocessing by comparing prediction and reference CSV files. |
|
|
|
Args: |
|
pred_file_path: Path to the prediction CSV file |
|
ref_file_path: Path to the reference CSV file |
|
subtask: The preprocessing subtask ('gene', 'clinical', 'linked') |
|
|
|
Returns: |
|
Dictionary of evaluation metrics |
|
""" |
|
|
|
default_metrics = { |
|
'attributes_jaccard': 0.0, |
|
'samples_jaccard': 0.0, |
|
'feature_correlation': 0.0, |
|
'composite_similarity_correlation': 0.0 |
|
} |
|
|
|
|
|
if not os.path.isfile(pred_file_path): |
|
return default_metrics |
|
|
|
try: |
|
|
|
df1 = pd.read_csv(pred_file_path, index_col=0) |
|
df2 = pd.read_csv(ref_file_path, index_col=0) |
|
|
|
|
|
df1.index.name = None |
|
df1.columns.name = None |
|
df2.index.name = None |
|
df2.columns.name = None |
|
|
|
|
|
if subtask != "linked": |
|
|
|
df1 = df1.T |
|
df2 = df2.T |
|
|
|
|
|
if df1.empty or df2.empty: |
|
return default_metrics |
|
|
|
|
|
attributes_jaccard = calculate_jaccard(set(df1.columns), set(df2.columns)) |
|
samples_jaccard = calculate_jaccard(set(df1.index), set(df2.index)) |
|
feature_correlation = calculate_pearson_correlation(df1, df2) |
|
composite_similarity_correlation = attributes_jaccard * samples_jaccard * feature_correlation |
|
|
|
return { |
|
'attributes_jaccard': attributes_jaccard, |
|
'samples_jaccard': samples_jaccard, |
|
'feature_correlation': feature_correlation, |
|
'composite_similarity_correlation': composite_similarity_correlation |
|
} |
|
except Exception as e: |
|
print(f"Error processing {pred_file_path} and {ref_file_path}") |
|
print(f"Error details: {str(e)}") |
|
print(traceback.format_exc()) |
|
return default_metrics |
|
|
|
|
|
def display_running_average(pbar, metrics_list, task_name, metrics_to_show=None, second_metrics_list=None, second_task_name=None, second_metrics_to_show=None): |
|
""" |
|
Display running average of metrics in the progress bar. |
|
|
|
Args: |
|
pbar: tqdm progress bar |
|
metrics_list: List of metric dictionaries |
|
task_name: Name of the task for display |
|
metrics_to_show: List of metrics to display (if None, show all numeric metrics) |
|
second_metrics_list: Optional second list of metrics to display (e.g., selection metrics) |
|
second_task_name: Name for the second task |
|
second_metrics_to_show: Metrics to show for the second task |
|
""" |
|
|
|
if not metrics_list: |
|
pbar.set_description(f"{task_name}: No metrics yet") |
|
return |
|
|
|
|
|
avg_metrics = average_metrics(metrics_list) |
|
|
|
|
|
if metrics_to_show is None: |
|
metrics_to_show = [k for k, v in avg_metrics.items() if isinstance(v, (int, float))] |
|
|
|
|
|
metrics_to_show = [m for m in metrics_to_show if m not in ['trait', 'file', 'condition', 'category']] |
|
|
|
|
|
desc_parts = [] |
|
for metric in metrics_to_show: |
|
if metric in avg_metrics: |
|
desc_parts.append(f"{metric[:3]}={avg_metrics[metric]:.2f}") |
|
|
|
|
|
second_desc_parts = [] |
|
if second_metrics_list and second_task_name: |
|
second_avg_metrics = average_metrics(second_metrics_list) |
|
|
|
if second_metrics_to_show is None: |
|
second_metrics_to_show = [k for k, v in second_avg_metrics.items() |
|
if isinstance(v, (int, float))] |
|
|
|
|
|
second_metrics_to_show = [m for m in second_metrics_to_show |
|
if m not in ['trait', 'file', 'condition', 'category']] |
|
|
|
for metric in second_metrics_to_show: |
|
if metric in second_avg_metrics: |
|
second_desc_parts.append(f"{metric[:3]}={second_avg_metrics[metric]:.2f}") |
|
|
|
|
|
description = f"{task_name}: " + " ".join(desc_parts) if desc_parts else f"{task_name}: No metrics yet" |
|
|
|
if second_desc_parts and second_task_name: |
|
description += f" | {second_task_name}: " + " ".join(second_desc_parts) |
|
|
|
|
|
pbar.set_description(description) |
|
|
|
|
|
def evaluate_dataset_preprocessing(pred_dir, ref_dir, subtasks=None): |
|
""" |
|
Evaluate preprocessing by comparing predicted and reference datasets. |
|
|
|
Args: |
|
pred_dir: Path to prediction directory |
|
ref_dir: Path to reference directory |
|
subtasks: List of subtasks to evaluate ('gene', 'clinical', 'linked') |
|
or None to evaluate all |
|
|
|
Returns: |
|
Dictionary of evaluation metrics for each subtask |
|
""" |
|
results = {} |
|
if subtasks is None: |
|
subtasks = ["gene", "clinical", "linked"] |
|
|
|
pred_preprocess_dir = os.path.join(pred_dir, "preprocess") |
|
ref_preprocess_dir = os.path.join(ref_dir, "preprocess") |
|
|
|
if not os.path.exists(pred_preprocess_dir): |
|
print(f"Warning: Preprocessing prediction directory '{pred_preprocess_dir}' does not exist.") |
|
return {subtask: {} for subtask in subtasks} |
|
|
|
for subtask in subtasks: |
|
metrics_list = [] |
|
processed_count = 0 |
|
|
|
|
|
trait_dirs = [] |
|
for t in os.listdir(ref_preprocess_dir): |
|
ref_trait_dir = os.path.join(ref_preprocess_dir, t) |
|
if os.path.isdir(ref_trait_dir): |
|
trait_dirs.append(t) |
|
|
|
|
|
total_files = 0 |
|
for trait in trait_dirs: |
|
ref_trait_dir = os.path.join(ref_preprocess_dir, trait) |
|
|
|
if subtask in ["gene", "clinical"]: |
|
sub_dir = os.path.join(ref_trait_dir, f"{subtask}_data") |
|
else: |
|
sub_dir = ref_trait_dir |
|
|
|
if os.path.isdir(sub_dir): |
|
csv_files = [f for f in os.listdir(sub_dir) if f.endswith(".csv")] |
|
total_files += len(csv_files) |
|
|
|
|
|
with tqdm(total=len(trait_dirs), desc=f"Evaluating {subtask} data preprocessing") as pbar: |
|
for trait_idx, trait in enumerate(trait_dirs): |
|
ref_trait_dir = os.path.join(ref_preprocess_dir, trait) |
|
|
|
|
|
if subtask in ["gene", "clinical"]: |
|
sub_dir = os.path.join(ref_trait_dir, f"{subtask}_data") |
|
else: |
|
sub_dir = ref_trait_dir |
|
|
|
if not os.path.isdir(sub_dir): |
|
pbar.update(1) |
|
continue |
|
|
|
|
|
csv_files = [f for f in sorted(os.listdir(sub_dir)) if f.endswith(".csv")] |
|
for file_idx, file in enumerate(csv_files): |
|
ref_file_path = os.path.join(sub_dir, file) |
|
|
|
|
|
if subtask in ["gene", "clinical"]: |
|
pred_file_path = os.path.join(pred_preprocess_dir, trait, f"{subtask}_data", file) |
|
else: |
|
pred_file_path = os.path.join(pred_preprocess_dir, trait, file) |
|
|
|
|
|
if not os.path.exists(pred_file_path): |
|
continue |
|
|
|
try: |
|
|
|
file_metrics = evaluate_csv(pred_file_path, ref_file_path, subtask) |
|
|
|
|
|
file_metrics['trait'] = trait |
|
file_metrics['file'] = file |
|
|
|
metrics_list.append(file_metrics) |
|
processed_count += 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
if (processed_count % 5 == 0 or |
|
processed_count == 1 or |
|
file_idx == len(csv_files) - 1 or |
|
trait_idx == len(trait_dirs) - 1): |
|
|
|
|
|
pbar.write(f"\nProcessed {processed_count}/{total_files} files") |
|
|
|
|
|
display_running_average( |
|
pbar, |
|
metrics_list, |
|
f"{subtask.capitalize()} preprocessing", |
|
['feature_correlation', 'composite_similarity_correlation'] |
|
) |
|
|
|
except Exception as e: |
|
print(f"Error evaluating {trait}/{file}: {str(e)}") |
|
|
|
pbar.update(1) |
|
|
|
|
|
results[subtask] = { |
|
'per_file': metrics_list, |
|
'average': average_metrics(metrics_list) |
|
} |
|
|
|
return results |
|
|
|
|
|
def evaluate_statistical_analysis(pred_dir, ref_dir): |
|
"""Evaluate statistical analysis (gene selection) task.""" |
|
results = {} |
|
pred_regress_dir = os.path.join(pred_dir, 'regress') |
|
ref_regress_dir = os.path.join(ref_dir, 'regress') |
|
|
|
if not os.path.exists(pred_regress_dir): |
|
print(f"Warning: Statistical analysis prediction directory '{pred_regress_dir}' does not exist.") |
|
return {}, {} |
|
|
|
|
|
trait_dirs = [t for t in sorted(os.listdir(ref_regress_dir)) |
|
if os.path.isdir(os.path.join(ref_regress_dir, t))] |
|
|
|
|
|
all_files = [] |
|
for trait in trait_dirs: |
|
ref_trait_path = os.path.join(ref_regress_dir, trait) |
|
json_files = [f for f in sorted(os.listdir(ref_trait_path)) |
|
if f.startswith('significant_genes') and f.endswith('.json')] |
|
|
|
for filename in json_files: |
|
parts = filename.split('_') |
|
condition = '_'.join(parts[3:])[:-5] |
|
ref_file = os.path.join(ref_trait_path, filename) |
|
pred_file = os.path.join(pred_regress_dir, trait, filename) |
|
all_files.append((trait, condition, ref_file, pred_file)) |
|
|
|
metrics_for_display = [] |
|
with tqdm(total=len(all_files), desc="Evaluating statistical analysis") as pbar: |
|
for i, (trait, condition, ref_file, pred_file) in enumerate(all_files): |
|
try: |
|
metrics = evaluate_problem_result(ref_file, pred_file) |
|
results[(trait, condition)] = metrics |
|
|
|
|
|
metrics_copy = metrics.copy() |
|
metrics_copy['trait'] = trait |
|
metrics_copy['condition'] = condition |
|
metrics_for_display.append(metrics_copy) |
|
|
|
|
|
|
|
if i == 0 or (i + 1) % 5 == 0 or i == len(all_files) - 1: |
|
display_running_average( |
|
pbar, |
|
metrics_for_display, |
|
"Statistical analysis", |
|
['precision', 'recall', 'f1', 'jaccard'] |
|
) |
|
except Exception as e: |
|
print(f"Error evaluating {pred_file}: {str(e)}") |
|
|
|
|
|
pbar.update(1) |
|
|
|
|
|
categorized_avg_metrics = categorize_and_aggregate(results) |
|
return results, categorized_avg_metrics |
|
|
|
|
|
def evaluate_problem_result(ref_file, pred_file): |
|
"""Calculate metrics for gene selection evaluation.""" |
|
assert os.path.exists(ref_file), "Reference file does not exist" |
|
with open(ref_file, 'r') as rfile: |
|
ref = json.load(rfile) |
|
ref_genes = ref["significant_genes"]["Variable"] |
|
|
|
|
|
metrics = {'success': 0.0, |
|
'precision': np.nan, |
|
'recall': np.nan, |
|
'f1': np.nan, |
|
'auroc': np.nan, |
|
'gsea_es': np.nan, |
|
'trait_pred_accuracy': np.nan, |
|
'trait_pred_f1': np.nan} |
|
|
|
if os.path.exists(pred_file): |
|
with open(pred_file, 'r') as file: |
|
result = json.load(file) |
|
pred_genes = result["significant_genes"]["Variable"] |
|
metrics.update(evaluate_gene_selection(pred_genes, ref_genes)) |
|
|
|
|
|
try: |
|
metrics['trait_pred_accuracy'] = result["cv_performance"]["prediction"]["accuracy"] |
|
except KeyError: |
|
pass |
|
try: |
|
metrics['trait_pred_f1'] = result["cv_performance"]["prediction"]["f1"] |
|
except KeyError: |
|
pass |
|
|
|
metrics['success'] = 100.0 |
|
|
|
return metrics |
|
|
|
|
|
def categorize_and_aggregate(results): |
|
"""Categorize and aggregate metrics by condition type.""" |
|
categorized_results = {'Unconditional one-step': [], 'Conditional one-step': [], 'Two-step': []} |
|
for pair, metrics in results.items(): |
|
condition = pair[1] |
|
if condition is None or condition.lower() == "none": |
|
category = 'Unconditional one-step' |
|
elif condition.lower() in ["age", "gender"]: |
|
category = 'Conditional one-step' |
|
else: |
|
category = 'Two-step' |
|
categorized_results[category].append(metrics) |
|
|
|
aggregated_metrics = {} |
|
for category, metrics_list in categorized_results.items(): |
|
aggregated_metrics[category] = average_metrics(metrics_list) |
|
aggregated_metrics['Overall'] = average_metrics( |
|
[metric for sublist in categorized_results.values() for metric in sublist]) |
|
return aggregated_metrics |
|
|
|
|
|
def main(pred_dir, ref_dir, tasks=None, preprocess_subtasks=None): |
|
""" |
|
Main evaluation function that can evaluate different tasks. |
|
|
|
Args: |
|
pred_dir: Path to prediction directory |
|
ref_dir: Path to reference directory |
|
tasks: List of tasks to evaluate ('selection', 'preprocessing', 'analysis') |
|
or None to evaluate all |
|
preprocess_subtasks: List of preprocessing subtasks to evaluate |
|
('gene', 'clinical', 'linked') or None to evaluate all |
|
|
|
Returns: |
|
Dictionary of evaluation results for each task |
|
""" |
|
if tasks is None: |
|
tasks = ["selection", "preprocessing", "analysis"] |
|
|
|
results = {} |
|
|
|
|
|
if "selection" in tasks: |
|
print("\n=== Evaluating Dataset Selection ===") |
|
results["selection"] = evaluate_dataset_selection(pred_dir, ref_dir) |
|
|
|
|
|
print("\nDataset Selection Results:") |
|
if "filtering_metrics" in results["selection"]: |
|
filtering_avg = results["selection"]["filtering_metrics"]["average"] |
|
print("\nFiltering Average Metrics:") |
|
for metric, value in filtering_avg.items(): |
|
if isinstance(value, (int, float)): |
|
print(f" {metric}: {value:.4f}") |
|
|
|
if "selection_metrics" in results["selection"]: |
|
selection_avg = results["selection"]["selection_metrics"]["average"] |
|
print("\nSelection Average Metrics:") |
|
for metric, value in selection_avg.items(): |
|
if isinstance(value, (int, float)): |
|
print(f" {metric}: {value:.4f}") |
|
|
|
|
|
if "preprocessing" in tasks: |
|
print("\n=== Evaluating Dataset Preprocessing ===") |
|
results["preprocessing"] = evaluate_dataset_preprocessing(pred_dir, ref_dir, preprocess_subtasks) |
|
|
|
|
|
print("\nDataset Preprocessing Results:") |
|
for subtask, subtask_results in results["preprocessing"].items(): |
|
if "average" in subtask_results: |
|
avg_metrics = subtask_results["average"] |
|
print(f"\n{subtask.capitalize()} Average Metrics:") |
|
for metric, value in avg_metrics.items(): |
|
if isinstance(value, (int, float)): |
|
print(f" {metric}: {value:.4f}") |
|
else: |
|
print(f" No results available for {subtask}") |
|
|
|
|
|
if "analysis" in tasks: |
|
print("\n=== Evaluating Statistical Analysis ===") |
|
problem_results, categorized_metrics = evaluate_statistical_analysis(pred_dir, ref_dir) |
|
results["analysis"] = { |
|
"problem_results": problem_results, |
|
"categorized": categorized_metrics |
|
} |
|
|
|
|
|
print("\nStatistical Analysis Results:") |
|
for category, metrics in categorized_metrics.items(): |
|
print(f"\n{category} Metrics:") |
|
for metric, value in metrics.items(): |
|
if isinstance(value, (int, float)): |
|
print(f" {metric}: {value:.4f}") |
|
|
|
return results |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="Evaluation script for GeneTex") |
|
parser.add_argument("-p", "--pred-dir", type=str, default="./pred", |
|
help="Path to the prediction directory") |
|
parser.add_argument("-r", "--ref-dir", type=str, default="./output", |
|
help="Path to the reference directory") |
|
parser.add_argument("-t", "--tasks", type=str, nargs="+", |
|
choices=["selection", "preprocessing", "analysis"], default=None, |
|
help="Tasks to evaluate (default: all)") |
|
parser.add_argument("-s", "--preprocess-subtasks", type=str, nargs="+", |
|
choices=["gene", "clinical", "linked"], default=None, |
|
help="Preprocessing subtasks to evaluate (default: all)") |
|
|
|
args = parser.parse_args() |
|
|
|
try: |
|
|
|
results = main(args.pred_dir, args.ref_dir, args.tasks, args.preprocess_subtasks) |
|
except Exception as e: |
|
print(f"Error in evaluation process: {str(e)}") |
|
print(traceback.format_exc()) |
|
|