File size: 7,819 Bytes
ccd1d8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
# This is a script for Molecule3D dataset preprocessing

# 1. Load modules
import pandas as pd
import numpy as np
import urllib.request
import tqdm
import rdkit
from rdkit import Chem
import os
import molvs
import csv
import json

standardizer = molvs.Standardizer()
fragment_remover = molvs.fragment.FragmentRemover()


# 2. Download the original dataset
# Original data
#  Molecule3D: A Benchmark for Predicting 3D Geometries from Molecular Graphs
#  Zhao Xu, Youzhi Luo, Xuan Zhang, Xinyi Xu, Yaochen Xie, Meng Liu, Kaleb Dickerson, Cheng Deng, Maho Nakata, Shuiwang Ji

# Please download the files from the link below:
#  https://drive.google.com/drive/u/2/folders/1y-EyoDYMvWZwClc2uvXrM4_hQBtM85BI
# Suppose the files have been downloaded and unzipped



# 3. This part adds SMILES in addition to SDF and save CSV files
#  List of file ranges and corresponding SDF/CSV filenames
file_ranges = [
    (0, 1000000),
    (1000001, 2000000),
    (2000001, 3000000),
    (3000001, 3899647)
]

#  Base directory for input and output files
base_dir = '/YOUR LOCAL DIRECTORY/' # Please change this part

for start, end in file_ranges:
    sdf_file = os.path.join(base_dir, f'combined_mols_{start}_to_{end}.sdf')
    output_csv = os.path.join(base_dir, f'smiles_{start}_{end}.csv')

    # Read the SDF file
    suppl = Chem.SDMolSupplier(sdf_file)

    # Write to CSV file with SMILES
    with open(output_csv, mode='w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow(['index', 'SMILES'])

        for idx, mol in enumerate(suppl):
            if mol is None:
                continue

            smiles = Chem.MolToSmiles(mol)
            writer.writerow([f'{idx + start + 1}', smiles])   
            
          
''' These files are expected to be stored: 
smiles_sdf_0_1000000.csv
smiles_sdf_1000001_2000000.csv
smiles_sdf_2000001_3000000.csv
smiles_sdf_3000001_3899647.csv'''



# 4. Check if there are any missing SMILES or sdf

df1 = pd.read_csv(f'{base_dir}/smiles_sdf_0_1000000.csv')  # Suppose that you have already change the 'base_dir' above
df2 = pd.read_csv(f'{base_dir}/smiles_sdf_1000001_2000000.csv')
df3 = pd.read_csv(f'{base_dir}/smiles_sdf_2000001_3000000.csv')
df4 = pd.read_csv(f'{base_dir}/smiles_sdf_3000001_3899647.csv')

missing_1 = df1[df1.isna().any(axis = 1)]
missing_2 = df2[df2.isna().any(axis = 1)]
missing_3 = df3[df3.isna().any(axis = 1)]
missing_4 = df4[df4.isna().any(axis = 1)]

print('For smiles_sdf_0_1000000.csv file : ', missing_1)
print('For smiles_sdf_1000001_2000000.csv file : ', missing_2)
print('For smiles_sdf_2000001_3000000.csv file : ', missing_3)
print('For smiles_sdf_3000001_3899647.csv file : ', missing_4)



# 5. Sanitize the molecules with MolVS

# This part would take a few hours 
df1['X'] = [ \
    rdkit.Chem.MolToSmiles(
        fragment_remover.remove(
        standardizer.standardize(
        rdkit.Chem.MolFromSmiles(
        smiles))))
    for smiles in df1['SMILES']]

problems = []
for index, row in tqdm.tqdm(df1.iterrows()):
    result = molvs.validate_smiles(row['X'])
    if len(result) == 0:
        continue
    problems.append( (row['X'], result) )

#   Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
    print(f"SMILES: {result}, problem: {alert[0]}")

df1.to_csv('smiles_sdf_0_1000000_sanitized.csv')    

df2['X'] = [ \
    rdkit.Chem.MolToSmiles(
        fragment_remover.remove(
        standardizer.standardize(
        rdkit.Chem.MolFromSmiles(
        smiles))))
    for smiles in df2['SMILES']]

problems = []
for index, row in tqdm.tqdm(df2.iterrows()):
    result = molvs.validate_smiles(row['X'])
    if len(result) == 0:
        continue
    problems.append( (row['X'], result) )

#   Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
    print(f"SMILES: {result}, problem: {alert[0]}")

df2.to_csv('smiles_sdf_1000001_2000000_sanitized.csv')    

df3['X'] = [ \
    rdkit.Chem.MolToSmiles(
        fragment_remover.remove(
        standardizer.standardize(
        rdkit.Chem.MolFromSmiles(
        smiles))))
    for smiles in df3['SMILES']]

problems = []
for index, row in tqdm.tqdm(df3.iterrows()):
    result = molvs.validate_smiles(row['X'])
    if len(result) == 0:
        continue
    problems.append( (row['X'], result) )

#   Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
    print(f"SMILES: {result}, problem: {alert[0]}")

df3.to_csv('smiles_sdf_2000001_3000000_sanitized.csv')    

df4['X'] = [ \
    rdkit.Chem.MolToSmiles(
        fragment_remover.remove(
        standardizer.standardize(
        rdkit.Chem.MolFromSmiles(
        smiles))))
    for smiles in df4['SMILES']]

problems = []
for index, row in tqdm.tqdm(df4.iterrows()):
    result = molvs.validate_smiles(row['X'])
    if len(result) == 0:
        continue
    problems.append( (row['X'], result) )

#   Most are because it includes the salt form and/or it is not neutralized
for result, alert in problems:
    print(f"SMILES: {result}, problem: {alert[0]}")

df4.to_csv('smiles_sdf_3000001_3899647_sanitized.csv')   



# 6. Concatenate four sanitized files to one long file
sanitized1 = pd.read_csv('smiles_sdf_0_1000000_sanitized.csv')
sanitized2 = pd.read_csv('smiles_sdf_1000001_2000000_sanitized.csv')
sanitized3 = pd.read_csv('smiles_sdf_2000001_3000000_sanitized.csv')
sanitized4 = pd.read_csv('smiles_sdf_3000001_3899647_sanitized.csv')

smiles_sdf_concatenated = pd.concat([sanitized1, sanitized2, sanitized3, sanitized4], ignore_index=True)

smiles_sdf_concatenated.to_csv('smiles_sdf_concatenated.csv', index = False) 



# 7. Combine the properties file to the smiles_sdf_concatenated.csv
smiles_sdf_concatenated = pd.read_csv('smiles_sdf_concatenated.csv')

properties = pd.read_csv('properties.csv')  # This file is also from the link provided above

smiles_sdf_properties_concatenated = pd.concat([smiles_sdf_concatenated, properties], axis=1)

smiles_sdf_properties_concatenated.to_csv('smiles_sdf_properties.csv', index = False)



# 8. Rename the columns
columns_selected = smiles_sdf_properties_concatenated[['Unnamed: 0', 'X', 'sdf', 'cid', 'dipole x', 'dipole y', 'dipole z', 'homo', 'lumo', 'homolumogap', 'scf energy']]
columns_selected.rename(columns={'Unnamed: 0': 'index', 'X': 'SMILES', 'homolumogap':'Y'}, inplace=True)

columns_selected.to_csv('Molecule3D_final.csv', index=False)



# 9. Split the dataset by using radom split and scaffold split
Molecule3D_final = pd.read_csv('Molecule3D_final.csv')

#  Random split
with open('random_split_inds.json', 'r') as f: # random or scaffold
    split_data = json.load(f)

random_train = Molecule3D_final[Molecule3D_final['index'].isin(split_data['train'])]
random_test = Molecule3D_final[Molecule3D_final['index'].isin(split_data['test'])]
random_valid = Molecule3D_final[Molecule3D_final['index'].isin(split_data['valid'])]

random_train.to_parquet('Molecule3D_random_train.parquet', index=False)
random_test.to_parquet('Molecule3D_random_test.parquet', index=False)
random_valid.to_parquet('Molecule3D_random_validation.parquet', index=False)


# Scaffold split
with open('scaffold_split_inds.json', 'r') as f: # random or scaffold
    split_scaffold = json.load(f)

scaffold_train = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['train'])]
scaffold_test = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['test'])]
scaffold_valid = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['valid'])]

scaffold_train.to_parquet('Molecule3D_scaffold_train.parquet', index=False)
scaffold_test.to_parquet('Molecule3D_scaffold_test.parquet', index=False)
scaffold_valid.to_parquet('Molecule3D_scaffold_validation.parquet', index=False)