|
|
|
|
|
|
|
import pandas as pd |
|
import numpy as np |
|
import urllib.request |
|
import tqdm |
|
import rdkit |
|
from rdkit import Chem |
|
import os |
|
import molvs |
|
import csv |
|
import json |
|
|
|
standardizer = molvs.Standardizer() |
|
fragment_remover = molvs.fragment.FragmentRemover() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
file_ranges = [ |
|
(0, 1000000), |
|
(1000001, 2000000), |
|
(2000001, 3000000), |
|
(3000001, 3899647) |
|
] |
|
|
|
|
|
base_dir = '/YOUR LOCAL DIRECTORY/' |
|
|
|
for start, end in file_ranges: |
|
sdf_file = os.path.join(base_dir, f'combined_mols_{start}_to_{end}.sdf') |
|
output_csv = os.path.join(base_dir, f'smiles_{start}_{end}.csv') |
|
|
|
|
|
suppl = Chem.SDMolSupplier(sdf_file) |
|
|
|
|
|
with open(output_csv, mode='w', newline='') as file: |
|
writer = csv.writer(file) |
|
writer.writerow(['index', 'SMILES']) |
|
|
|
for idx, mol in enumerate(suppl): |
|
if mol is None: |
|
continue |
|
|
|
smiles = Chem.MolToSmiles(mol) |
|
writer.writerow([f'{idx + start + 1}', smiles]) |
|
|
|
|
|
''' These files are expected to be stored: |
|
smiles_sdf_0_1000000.csv |
|
smiles_sdf_1000001_2000000.csv |
|
smiles_sdf_2000001_3000000.csv |
|
smiles_sdf_3000001_3899647.csv''' |
|
|
|
|
|
|
|
|
|
|
|
df1 = pd.read_csv(f'{base_dir}/smiles_sdf_0_1000000.csv') |
|
df2 = pd.read_csv(f'{base_dir}/smiles_sdf_1000001_2000000.csv') |
|
df3 = pd.read_csv(f'{base_dir}/smiles_sdf_2000001_3000000.csv') |
|
df4 = pd.read_csv(f'{base_dir}/smiles_sdf_3000001_3899647.csv') |
|
|
|
missing_1 = df1[df1.isna().any(axis = 1)] |
|
missing_2 = df2[df2.isna().any(axis = 1)] |
|
missing_3 = df3[df3.isna().any(axis = 1)] |
|
missing_4 = df4[df4.isna().any(axis = 1)] |
|
|
|
print('For smiles_sdf_0_1000000.csv file : ', missing_1) |
|
print('For smiles_sdf_1000001_2000000.csv file : ', missing_2) |
|
print('For smiles_sdf_2000001_3000000.csv file : ', missing_3) |
|
print('For smiles_sdf_3000001_3899647.csv file : ', missing_4) |
|
|
|
|
|
|
|
|
|
|
|
|
|
df1['X'] = [ \ |
|
rdkit.Chem.MolToSmiles( |
|
fragment_remover.remove( |
|
standardizer.standardize( |
|
rdkit.Chem.MolFromSmiles( |
|
smiles)))) |
|
for smiles in df1['SMILES']] |
|
|
|
problems = [] |
|
for index, row in tqdm.tqdm(df1.iterrows()): |
|
result = molvs.validate_smiles(row['X']) |
|
if len(result) == 0: |
|
continue |
|
problems.append( (row['X'], result) ) |
|
|
|
|
|
for result, alert in problems: |
|
print(f"SMILES: {result}, problem: {alert[0]}") |
|
|
|
df1.to_csv('smiles_sdf_0_1000000_sanitized.csv') |
|
|
|
df2['X'] = [ \ |
|
rdkit.Chem.MolToSmiles( |
|
fragment_remover.remove( |
|
standardizer.standardize( |
|
rdkit.Chem.MolFromSmiles( |
|
smiles)))) |
|
for smiles in df2['SMILES']] |
|
|
|
problems = [] |
|
for index, row in tqdm.tqdm(df2.iterrows()): |
|
result = molvs.validate_smiles(row['X']) |
|
if len(result) == 0: |
|
continue |
|
problems.append( (row['X'], result) ) |
|
|
|
|
|
for result, alert in problems: |
|
print(f"SMILES: {result}, problem: {alert[0]}") |
|
|
|
df2.to_csv('smiles_sdf_1000001_2000000_sanitized.csv') |
|
|
|
df3['X'] = [ \ |
|
rdkit.Chem.MolToSmiles( |
|
fragment_remover.remove( |
|
standardizer.standardize( |
|
rdkit.Chem.MolFromSmiles( |
|
smiles)))) |
|
for smiles in df3['SMILES']] |
|
|
|
problems = [] |
|
for index, row in tqdm.tqdm(df3.iterrows()): |
|
result = molvs.validate_smiles(row['X']) |
|
if len(result) == 0: |
|
continue |
|
problems.append( (row['X'], result) ) |
|
|
|
|
|
for result, alert in problems: |
|
print(f"SMILES: {result}, problem: {alert[0]}") |
|
|
|
df3.to_csv('smiles_sdf_2000001_3000000_sanitized.csv') |
|
|
|
df4['X'] = [ \ |
|
rdkit.Chem.MolToSmiles( |
|
fragment_remover.remove( |
|
standardizer.standardize( |
|
rdkit.Chem.MolFromSmiles( |
|
smiles)))) |
|
for smiles in df4['SMILES']] |
|
|
|
problems = [] |
|
for index, row in tqdm.tqdm(df4.iterrows()): |
|
result = molvs.validate_smiles(row['X']) |
|
if len(result) == 0: |
|
continue |
|
problems.append( (row['X'], result) ) |
|
|
|
|
|
for result, alert in problems: |
|
print(f"SMILES: {result}, problem: {alert[0]}") |
|
|
|
df4.to_csv('smiles_sdf_3000001_3899647_sanitized.csv') |
|
|
|
|
|
|
|
|
|
sanitized1 = pd.read_csv('smiles_sdf_0_1000000_sanitized.csv') |
|
sanitized2 = pd.read_csv('smiles_sdf_1000001_2000000_sanitized.csv') |
|
sanitized3 = pd.read_csv('smiles_sdf_2000001_3000000_sanitized.csv') |
|
sanitized4 = pd.read_csv('smiles_sdf_3000001_3899647_sanitized.csv') |
|
|
|
smiles_sdf_concatenated = pd.concat([sanitized1, sanitized2, sanitized3, sanitized4], ignore_index=True) |
|
|
|
smiles_sdf_concatenated.to_csv('smiles_sdf_concatenated.csv', index = False) |
|
|
|
|
|
|
|
|
|
smiles_sdf_concatenated = pd.read_csv('smiles_sdf_concatenated.csv') |
|
|
|
properties = pd.read_csv('properties.csv') |
|
|
|
smiles_sdf_properties_concatenated = pd.concat([smiles_sdf_concatenated, properties], axis=1) |
|
|
|
smiles_sdf_properties_concatenated.to_csv('smiles_sdf_properties.csv', index = False) |
|
|
|
|
|
|
|
|
|
columns_selected = smiles_sdf_properties_concatenated[['Unnamed: 0', 'X', 'sdf', 'cid', 'dipole x', 'dipole y', 'dipole z', 'homo', 'lumo', 'homolumogap', 'scf energy']] |
|
columns_selected.rename(columns={'Unnamed: 0': 'index', 'X': 'SMILES', 'homolumogap':'Y'}, inplace=True) |
|
|
|
columns_selected.to_csv('Molecule3D_final.csv', index=False) |
|
|
|
|
|
|
|
|
|
Molecule3D_final = pd.read_csv('Molecule3D_final.csv') |
|
|
|
|
|
with open('random_split_inds.json', 'r') as f: |
|
split_data = json.load(f) |
|
|
|
random_train = Molecule3D_final[Molecule3D_final['index'].isin(split_data['train'])] |
|
random_test = Molecule3D_final[Molecule3D_final['index'].isin(split_data['test'])] |
|
random_valid = Molecule3D_final[Molecule3D_final['index'].isin(split_data['valid'])] |
|
|
|
random_train.to_parquet('Molecule3D_random_train.parquet', index=False) |
|
random_test.to_parquet('Molecule3D_random_test.parquet', index=False) |
|
random_valid.to_parquet('Molecule3D_random_validation.parquet', index=False) |
|
|
|
|
|
|
|
with open('scaffold_split_inds.json', 'r') as f: |
|
split_scaffold = json.load(f) |
|
|
|
scaffold_train = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['train'])] |
|
scaffold_test = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['test'])] |
|
scaffold_valid = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['valid'])] |
|
|
|
scaffold_train.to_parquet('Molecule3D_scaffold_train.parquet', index=False) |
|
scaffold_test.to_parquet('Molecule3D_scaffold_test.parquet', index=False) |
|
scaffold_valid.to_parquet('Molecule3D_scaffold_validation.parquet', index=False) |
|
|
|
|
|
|
|
|
|
|
|
|