# This is a script for Molecule3D dataset preprocessing # 1. Load modules import pandas as pd import numpy as np import urllib.request import tqdm import rdkit from rdkit import Chem import os import molvs import csv import json standardizer = molvs.Standardizer() fragment_remover = molvs.fragment.FragmentRemover() # 2. Download the original dataset # Original data # Molecule3D: A Benchmark for Predicting 3D Geometries from Molecular Graphs # Zhao Xu, Youzhi Luo, Xuan Zhang, Xinyi Xu, Yaochen Xie, Meng Liu, Kaleb Dickerson, Cheng Deng, Maho Nakata, Shuiwang Ji # Please download the files from the link below: # https://drive.google.com/drive/u/2/folders/1y-EyoDYMvWZwClc2uvXrM4_hQBtM85BI # Suppose the files have been downloaded and unzipped # 3. This part adds SMILES in addition to SDF and save CSV files # List of file ranges and corresponding SDF/CSV filenames file_ranges = [ (0, 1000000), (1000001, 2000000), (2000001, 3000000), (3000001, 3899647) ] # Base directory for input and output files base_dir = '/YOUR LOCAL DIRECTORY/' # Please change this part for start, end in file_ranges: sdf_file = os.path.join(base_dir, f'combined_mols_{start}_to_{end}.sdf') output_csv = os.path.join(base_dir, f'smiles_{start}_{end}.csv') # Read the SDF file suppl = Chem.SDMolSupplier(sdf_file) # Write to CSV file with SMILES with open(output_csv, mode='w', newline='') as file: writer = csv.writer(file) writer.writerow(['index', 'SMILES']) for idx, mol in enumerate(suppl): if mol is None: continue smiles = Chem.MolToSmiles(mol) writer.writerow([f'{idx + start + 1}', smiles]) ''' These files are expected to be stored: smiles_sdf_0_1000000.csv smiles_sdf_1000001_2000000.csv smiles_sdf_2000001_3000000.csv smiles_sdf_3000001_3899647.csv''' # 4. Check if there are any missing SMILES or sdf df1 = pd.read_csv(f'{base_dir}/smiles_sdf_0_1000000.csv') # Suppose that you have already change the 'base_dir' above df2 = pd.read_csv(f'{base_dir}/smiles_sdf_1000001_2000000.csv') df3 = pd.read_csv(f'{base_dir}/smiles_sdf_2000001_3000000.csv') df4 = pd.read_csv(f'{base_dir}/smiles_sdf_3000001_3899647.csv') missing_1 = df1[df1.isna().any(axis = 1)] missing_2 = df2[df2.isna().any(axis = 1)] missing_3 = df3[df3.isna().any(axis = 1)] missing_4 = df4[df4.isna().any(axis = 1)] print('For smiles_sdf_0_1000000.csv file : ', missing_1) print('For smiles_sdf_1000001_2000000.csv file : ', missing_2) print('For smiles_sdf_2000001_3000000.csv file : ', missing_3) print('For smiles_sdf_3000001_3899647.csv file : ', missing_4) # 5. Sanitize the molecules with MolVS # This part would take a few hours df1['X'] = [ \ rdkit.Chem.MolToSmiles( fragment_remover.remove( standardizer.standardize( rdkit.Chem.MolFromSmiles( smiles)))) for smiles in df1['SMILES']] problems = [] for index, row in tqdm.tqdm(df1.iterrows()): result = molvs.validate_smiles(row['X']) if len(result) == 0: continue problems.append( (row['X'], result) ) # Most are because it includes the salt form and/or it is not neutralized for result, alert in problems: print(f"SMILES: {result}, problem: {alert[0]}") df1.to_csv('smiles_sdf_0_1000000_sanitized.csv') df2['X'] = [ \ rdkit.Chem.MolToSmiles( fragment_remover.remove( standardizer.standardize( rdkit.Chem.MolFromSmiles( smiles)))) for smiles in df2['SMILES']] problems = [] for index, row in tqdm.tqdm(df2.iterrows()): result = molvs.validate_smiles(row['X']) if len(result) == 0: continue problems.append( (row['X'], result) ) # Most are because it includes the salt form and/or it is not neutralized for result, alert in problems: print(f"SMILES: {result}, problem: {alert[0]}") df2.to_csv('smiles_sdf_1000001_2000000_sanitized.csv') df3['X'] = [ \ rdkit.Chem.MolToSmiles( fragment_remover.remove( standardizer.standardize( rdkit.Chem.MolFromSmiles( smiles)))) for smiles in df3['SMILES']] problems = [] for index, row in tqdm.tqdm(df3.iterrows()): result = molvs.validate_smiles(row['X']) if len(result) == 0: continue problems.append( (row['X'], result) ) # Most are because it includes the salt form and/or it is not neutralized for result, alert in problems: print(f"SMILES: {result}, problem: {alert[0]}") df3.to_csv('smiles_sdf_2000001_3000000_sanitized.csv') df4['X'] = [ \ rdkit.Chem.MolToSmiles( fragment_remover.remove( standardizer.standardize( rdkit.Chem.MolFromSmiles( smiles)))) for smiles in df4['SMILES']] problems = [] for index, row in tqdm.tqdm(df4.iterrows()): result = molvs.validate_smiles(row['X']) if len(result) == 0: continue problems.append( (row['X'], result) ) # Most are because it includes the salt form and/or it is not neutralized for result, alert in problems: print(f"SMILES: {result}, problem: {alert[0]}") df4.to_csv('smiles_sdf_3000001_3899647_sanitized.csv') # 6. Concatenate four sanitized files to one long file sanitized1 = pd.read_csv('smiles_sdf_0_1000000_sanitized.csv') sanitized2 = pd.read_csv('smiles_sdf_1000001_2000000_sanitized.csv') sanitized3 = pd.read_csv('smiles_sdf_2000001_3000000_sanitized.csv') sanitized4 = pd.read_csv('smiles_sdf_3000001_3899647_sanitized.csv') smiles_sdf_concatenated = pd.concat([sanitized1, sanitized2, sanitized3, sanitized4], ignore_index=True) smiles_sdf_concatenated.to_csv('smiles_sdf_concatenated.csv', index = False) # 7. Combine the properties file to the smiles_sdf_concatenated.csv smiles_sdf_concatenated = pd.read_csv('smiles_sdf_concatenated.csv') properties = pd.read_csv('properties.csv') # This file is also from the link provided above smiles_sdf_properties_concatenated = pd.concat([smiles_sdf_concatenated, properties], axis=1) smiles_sdf_properties_concatenated.to_csv('smiles_sdf_properties.csv', index = False) # 8. Rename the columns columns_selected = smiles_sdf_properties_concatenated[['Unnamed: 0', 'X', 'sdf', 'cid', 'dipole x', 'dipole y', 'dipole z', 'homo', 'lumo', 'homolumogap', 'scf energy']] columns_selected.rename(columns={'Unnamed: 0': 'index', 'X': 'SMILES', 'homolumogap':'Y'}, inplace=True) columns_selected.to_csv('Molecule3D_final.csv', index=False) # 9. Split the dataset by using radom split and scaffold split Molecule3D_final = pd.read_csv('Molecule3D_final.csv') # Random split with open('random_split_inds.json', 'r') as f: # random or scaffold split_data = json.load(f) random_train = Molecule3D_final[Molecule3D_final['index'].isin(split_data['train'])] random_test = Molecule3D_final[Molecule3D_final['index'].isin(split_data['test'])] random_valid = Molecule3D_final[Molecule3D_final['index'].isin(split_data['valid'])] random_train.to_parquet('Molecule3D_random_train.parquet', index=False) random_test.to_parquet('Molecule3D_random_test.parquet', index=False) random_valid.to_parquet('Molecule3D_random_validation.parquet', index=False) # Scaffold split with open('scaffold_split_inds.json', 'r') as f: # random or scaffold split_scaffold = json.load(f) scaffold_train = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['train'])] scaffold_test = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['test'])] scaffold_valid = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['valid'])] scaffold_train.to_parquet('Molecule3D_scaffold_train.parquet', index=False) scaffold_test.to_parquet('Molecule3D_scaffold_test.parquet', index=False) scaffold_valid.to_parquet('Molecule3D_scaffold_validation.parquet', index=False)