audrey06100 commited on
Commit
4734edd
·
1 Parent(s): d91102e
Files changed (2) hide show
  1. app.py +44 -61
  2. app_utils.py +7 -9
app.py CHANGED
@@ -5,30 +5,29 @@ import os
5
  import random
6
 
7
  readme = """
8
-
9
- This tool serves two main purposes:
10
  1. **Channel Mapping**: Align your EEG channels with our template channels to ensure compatibility with our models.
11
  2. **EEG Data Denoising**: Use our pre-trained models—**ART**, **IC-U-Net**, **IC-U-Net++**, and **IC-U-Net-Attn**—to denoise your EEG data.
12
 
13
  ## File Requirements and Preparation
14
  - **Channel locations**: If you don't have the channel location file, we recommend you to download the standard montage <a href="">here</a>. If the channels in those files don't match yours, you can use **EEGLAB** to adjust them to your required montage.
15
  - **Raw data**: The data need to be a two-dimentional array (channel, timepoint).
16
- - **Channel requirements**: Your data must include some channels that correspond to our template channels, including: ``Fp1, Fp2, F7, F3, Fz, F4, F8, FT7, FC3, FCz, FC4, FT8, T7, C3, Cz, C4, T8, TP7, CP3, CPz, CP4, TP8, P7, P3, Pz, P4, P8, O1, Oz, O2``. At least some of them need to be present for successful mapping.
17
- - **Channel removal**: Before uploading your files, please remove any reference, ECG, EOG, EMG... channels.
18
 
19
  ## 1. Channel Mapping
20
  The following steps will guide you through the process of mapping your EEG channels to our template channels.
21
 
22
  ### Step1: Initial Matching and Rescaling
23
  After clicking on ``Mapping`` button, we will first match your channels to our template channels by their names. Using the matched channels as reference points, we will apply Thin Plate Spline (TPS) transformation to rescale your montage to align with our template's scale. The template montage and your rescaled montage will be displayed side by side for comparison. Channels that do not have a match in our template will be **highlighted in red**.
24
- - If your data includes all the 30 template channels, you will be directed to **Mapping Results**.
25
  - If your data doesn't include all the 30 template channels and you have some channels that do not match the template, you will be directed to **Step2**.
26
  - If all your channels are included in our template but you have fewer than 30 channels, you will be directed to **Step3**.
27
 
28
  ### Step2: Forwarding Unmatched Channels
29
  In this step, you will handle the channels that didn't have a direct match with our template, by manually assigning them to the template channels that are still empty, ensuring the most efficient use of your data.
30
  Your unmatched channels, previously highlighted in red, will be shown on your montage with a radio button displayed above each. You can choose to forward the data from these unmatched channels to the empty template channels. The interface will display each empty template channel in sequence, allowing you to select which of your unmatched channels to forward.
31
- - If all empty template channels are filled by your selections, you will be directed to **Mapping Results**.
32
  - If there are still empty template channels remaining, you will be directed to **Step3**.
33
 
34
  ### Step3: Filling Remaining Template Channels
@@ -36,7 +35,7 @@ To run the models successfully, we need to ensure that all 30 template channels
36
  - **Mean** method: Each empty template channel is filled with the average value of data from the nearest input channels. By default, the 4 closest input channels (determined after aligning your montage to the template's scale using TPS) are selected for this averaging process. On the interface, you will see checkboxes displayed above each of your channel. The 4 nearest channels are pre-selected by default for each empty template channels, but you can modify these selections as needed. If you uncheck all the checkboxes for a particular template channel, it will be filled with zeros.
37
  - **Zero** method: All empty template channels are filled with zeros.
38
  Choose the method that best suits your needs, considering that the model's performance may vary depending on the method used.
39
- Once all template channels are filled, you will be directed to **Mapping Results**.
40
 
41
  ### Mapping Results
42
  After completing the previous steps, your channels will be aligned with the template channels required by our models. In case there are still some channels that haven't been mapped, we will automatically batch and optimally assign them to the template.
@@ -58,7 +57,7 @@ init_js = """
58
  channel_info = JSON.parse(JSON.stringify(channel_info));
59
  stage1_info = app_info.stage1
60
 
61
- let selector, classname, attribute;
62
  let channel, left, bottom;
63
 
64
  if(stage1_info.state == "step2-selecting"){
@@ -81,7 +80,6 @@ init_js = """
81
  //height: 560px;
82
  background: url("file=${stage1_info.fileNames.input_montage}");
83
  background-size: contain;
84
-
85
  `;
86
 
87
  // move the radios/checkboxes
@@ -96,7 +94,6 @@ init_js = """
96
  item.querySelector(":scope > span").innerText = "";
97
  });
98
 
99
-
100
  // add indication for the missing channels
101
  channel = stage1_info.missingTemplates[0];
102
  left = channel_info.templateDict[channel].css_position[0];
@@ -170,7 +167,7 @@ update_js = """
170
  selector = "#chkbox-group > div:nth-of-type(2)";
171
  }else return;
172
 
173
- // update indication
174
  channel = stage1_info.missingTemplates[stage1_info["fillingCount"]-1];
175
  left = channel_info.templateDict[channel].css_position[0];
176
  bottom = channel_info.templateDict[channel].css_position[1];
@@ -216,7 +213,6 @@ update_js = """
216
  }
217
  """
218
 
219
-
220
  with gr.Blocks() as demo:
221
 
222
  app_info_json = gr.JSON(visible=False)
@@ -231,51 +227,50 @@ with gr.Blocks() as demo:
231
  with gr.Row():
232
 
233
  with gr.Column():
 
234
  with gr.Row(variant='panel'):
235
  with gr.Column():
236
  gr.Markdown("# 1.Channel Mapping")
237
- # ------------------------input--------------------------
238
  in_loc_file = gr.File(label="Channel locations (.loc, .locs, .xyz, .sfp, .txt)",
239
  file_types=[".loc", "locs", ".xyz", ".sfp", ".txt"])
240
  with gr.Row():
241
  in_samplerate = gr.Textbox(label="Sampling rate (Hz)", scale=2)
242
  map_btn = gr.Button("Mapping", scale=1)
243
-
244
- # ------------------------mapping------------------------
245
- desc_md = gr.Markdown(visible=False)
246
- # step1 : initial matching and rescaling
247
- with gr.Row():
248
- tpl_img = gr.Image("./template_montage.png", label="Template channels", visible=False)
249
- mapped_img = gr.Image(label="Input channels", visible=False)
250
- # step2 : forward unmatched input channels to empty template channels
251
- radio_group = gr.Radio(elem_id="radio-group", visible=False)
252
- # step3 : fill the remaining template channels
253
- with gr.Row():
254
- in_fillmode = gr.Dropdown(choices=["mean", "zero"],
255
  value="mean",
256
  label="Filling method",
257
  visible=False,
258
  scale=2)
259
- fillmode_btn = gr.Button("OK", visible=False, scale=1)
260
- chkbox_group = gr.CheckboxGroup(elem_id="chkbox-group", visible=False)
261
- # step4 : mapping result
262
- out_json_file = gr.File(label="Mapping result", visible=False)
263
- res_md = gr.Markdown("""
264
- (Download this file if you plan to run the models using the source code.)
265
- """, visible=False)
266
-
267
- with gr.Row():
268
- clear_btn = gr.Button("Clear", visible=False)
269
- step2_btn = gr.Button("Next", visible=False)
270
- step3_btn = gr.Button("Next", visible=False)
271
- next_btn = gr.Button("Next step", visible=False)
272
- # -------------------------------------------------------
273
 
274
  with gr.Column():
 
275
  with gr.Row(variant='panel'):
276
  with gr.Column():
277
  gr.Markdown("# 2.Decode Data")
278
- # ------------------------input--------------------------
279
  in_data_file = gr.File(label="Raw data (.csv)", file_types=[".csv"])
280
  with gr.Row():
281
  in_modelname = gr.Dropdown(choices=[
@@ -283,17 +278,15 @@ with gr.Blocks() as demo:
283
  ("IC-U-Net", "ICUNet"),
284
  ("IC-U-Net++", "UNetpp"),
285
  ("IC-U-Net-Attn", "AttUnet")],
286
- #"(mapped data)",
287
- #"(denoised data)"],
288
  value="EEGART",
289
  label="Model",
290
  scale=2)
291
  run_btn = gr.Button(interactive=False, scale=1)
292
-
293
- # ------------------------output-------------------------
294
- batch_md = gr.Markdown(visible=False)
295
- out_data_file = gr.File(label="Denoised data", visible=False)
296
- # -------------------------------------------------------
297
 
298
  with gr.Row():
299
  with gr.Tab("ART"):
@@ -306,8 +299,6 @@ with gr.Blocks() as demo:
306
  gr.Markdown()
307
  with gr.Tab("README"):
308
  gr.Markdown(readme)
309
-
310
- #demo.load(js=js)
311
 
312
  # +========================================================================================+
313
  # | Stage1: channel mapping |
@@ -320,7 +311,7 @@ with gr.Blocks() as demo:
320
  except OSError as e:
321
  utils.dataDelete(rootpath+"/session_data/")
322
  os.mkdir(rootpath+"/session_data/")
323
- print(e)
324
  os.mkdir(rootpath+"/session_data/stage1/")
325
  os.mkdir(rootpath+"/session_data/stage2/")
326
 
@@ -429,7 +420,6 @@ with gr.Blocks() as demo:
429
  tpl_img : gr.Image(visible=True),
430
  mapped_img : gr.Image(value=filename2, visible=True),
431
  next_btn : gr.Button(visible=True)}
432
-
433
 
434
  # ========================================step1=========================================
435
  elif stage1_info["state"] == "step1-finished":
@@ -706,16 +696,16 @@ with gr.Blocks() as demo:
706
  # +========================================================================================+
707
  # | Stage1-step2 |
708
  # +========================================================================================+
709
- # ...
710
  @radio_group.select(inputs = app_info_json, outputs = [step2_btn, next_btn])
711
  def determine_button(app_info):
712
  stage1_info = app_info["stage1"]
713
- if len(stage1_info["unassignedInputs"])==1 and stage1_info["fillingCount"]<stage1_info["totalFillingNum"]:
714
  return {step2_btn : gr.Button(visible=False),
715
  next_btn : gr.Button(visible=True)}
716
  else:
717
  return {step2_btn : gr.Button()} # change nothing
718
-
719
  @clear_btn.click(inputs = app_info_json, outputs = [radio_group, step2_btn, next_btn])
720
  def clear_value(app_info):
721
  stage1_info = app_info["stage1"]
@@ -905,19 +895,12 @@ with gr.Blocks() as demo:
905
  try:
906
  # step1: Reorder input data
907
  data_shape = app_utils.reorder_data(new_idx, fill_flags, filename, filepath+"temp_data/mapped.csv")
908
- if modelname == "(mapped data)":
909
- new_filename = filepath+"temp_data/mapped.csv"
910
- break
911
  # step2: Data preprocessing
912
  total_file_num = utils.preprocessing(filepath+"temp_data/", "mapped.csv", samplerate)
913
  # step3: Signal reconstruction
914
  utils.reconstruct(modelname, total_file_num, filepath+"temp_data/", "denoised.csv", samplerate)
915
- if modelname == "(denoised data)":
916
- new_filename = filepath+"temp_data/denoised.csv"
917
- break
918
  # step4: Restore original order
919
  app_utils.restore_order(i, data_shape, new_idx, fill_flags, filepath+"temp_data/denoised.csv", new_filename)
920
- break
921
  except FileNotFoundError:
922
  print('break2!!')
923
  break_flag = True
 
5
  import random
6
 
7
  readme = """
8
+ ## Introduction
9
+ This tool is designed to assist you in two key tasks:
10
  1. **Channel Mapping**: Align your EEG channels with our template channels to ensure compatibility with our models.
11
  2. **EEG Data Denoising**: Use our pre-trained models—**ART**, **IC-U-Net**, **IC-U-Net++**, and **IC-U-Net-Attn**—to denoise your EEG data.
12
 
13
  ## File Requirements and Preparation
14
  - **Channel locations**: If you don't have the channel location file, we recommend you to download the standard montage <a href="">here</a>. If the channels in those files don't match yours, you can use **EEGLAB** to adjust them to your required montage.
15
  - **Raw data**: The data need to be a two-dimentional array (channel, timepoint).
16
+ - **Channel requirements**: Your data must include some channels that correspond to our template channels, which include: ``Fp1, Fp2, F7, F3, Fz, F4, F8, FT7, FC3, FCz, FC4, FT8, T7, C3, Cz, C4, T8, TP7, CP3, CPz, CP4, TP8, P7, P3, Pz, P4, P8, O1, Oz, O2``. At least some of them need to be present for successful mapping. Additionally, please remove any reference, ECG, EOG, EMG... channels before uploading your files.
 
17
 
18
  ## 1. Channel Mapping
19
  The following steps will guide you through the process of mapping your EEG channels to our template channels.
20
 
21
  ### Step1: Initial Matching and Rescaling
22
  After clicking on ``Mapping`` button, we will first match your channels to our template channels by their names. Using the matched channels as reference points, we will apply Thin Plate Spline (TPS) transformation to rescale your montage to align with our template's scale. The template montage and your rescaled montage will be displayed side by side for comparison. Channels that do not have a match in our template will be **highlighted in red**.
23
+ - If your data includes all the 30 template channels, you will see the **Mapping Results**.
24
  - If your data doesn't include all the 30 template channels and you have some channels that do not match the template, you will be directed to **Step2**.
25
  - If all your channels are included in our template but you have fewer than 30 channels, you will be directed to **Step3**.
26
 
27
  ### Step2: Forwarding Unmatched Channels
28
  In this step, you will handle the channels that didn't have a direct match with our template, by manually assigning them to the template channels that are still empty, ensuring the most efficient use of your data.
29
  Your unmatched channels, previously highlighted in red, will be shown on your montage with a radio button displayed above each. You can choose to forward the data from these unmatched channels to the empty template channels. The interface will display each empty template channel in sequence, allowing you to select which of your unmatched channels to forward.
30
+ - If all empty template channels are filled by your selections, you will see the **Mapping Results**.
31
  - If there are still empty template channels remaining, you will be directed to **Step3**.
32
 
33
  ### Step3: Filling Remaining Template Channels
 
35
  - **Mean** method: Each empty template channel is filled with the average value of data from the nearest input channels. By default, the 4 closest input channels (determined after aligning your montage to the template's scale using TPS) are selected for this averaging process. On the interface, you will see checkboxes displayed above each of your channel. The 4 nearest channels are pre-selected by default for each empty template channels, but you can modify these selections as needed. If you uncheck all the checkboxes for a particular template channel, it will be filled with zeros.
36
  - **Zero** method: All empty template channels are filled with zeros.
37
  Choose the method that best suits your needs, considering that the model's performance may vary depending on the method used.
38
+ Once all template channels are filled, you will see the **Mapping Results**.
39
 
40
  ### Mapping Results
41
  After completing the previous steps, your channels will be aligned with the template channels required by our models. In case there are still some channels that haven't been mapped, we will automatically batch and optimally assign them to the template.
 
57
  channel_info = JSON.parse(JSON.stringify(channel_info));
58
  stage1_info = app_info.stage1
59
 
60
+ let selector, attribute; //, classname;
61
  let channel, left, bottom;
62
 
63
  if(stage1_info.state == "step2-selecting"){
 
80
  //height: 560px;
81
  background: url("file=${stage1_info.fileNames.input_montage}");
82
  background-size: contain;
 
83
  `;
84
 
85
  // move the radios/checkboxes
 
94
  item.querySelector(":scope > span").innerText = "";
95
  });
96
 
 
97
  // add indication for the missing channels
98
  channel = stage1_info.missingTemplates[0];
99
  left = channel_info.templateDict[channel].css_position[0];
 
167
  selector = "#chkbox-group > div:nth-of-type(2)";
168
  }else return;
169
 
170
+ // update the indication
171
  channel = stage1_info.missingTemplates[stage1_info["fillingCount"]-1];
172
  left = channel_info.templateDict[channel].css_position[0];
173
  bottom = channel_info.templateDict[channel].css_position[1];
 
213
  }
214
  """
215
 
 
216
  with gr.Blocks() as demo:
217
 
218
  app_info_json = gr.JSON(visible=False)
 
227
  with gr.Row():
228
 
229
  with gr.Column():
230
+ # ------------------------input--------------------------
231
  with gr.Row(variant='panel'):
232
  with gr.Column():
233
  gr.Markdown("# 1.Channel Mapping")
 
234
  in_loc_file = gr.File(label="Channel locations (.loc, .locs, .xyz, .sfp, .txt)",
235
  file_types=[".loc", "locs", ".xyz", ".sfp", ".txt"])
236
  with gr.Row():
237
  in_samplerate = gr.Textbox(label="Sampling rate (Hz)", scale=2)
238
  map_btn = gr.Button("Mapping", scale=1)
239
+ # ------------------------mapping------------------------
240
+ desc_md = gr.Markdown(visible=False)
241
+ # step1 : initial matching and rescaling
242
+ with gr.Row():
243
+ tpl_img = gr.Image("./template_montage.png", label="Template channels", visible=False)
244
+ mapped_img = gr.Image(label="Input channels", visible=False)
245
+ # step2 : forward unmatched input channels to empty template channels
246
+ radio_group = gr.Radio(elem_id="radio-group", visible=False)
247
+ # step3 : fill the remaining template channels
248
+ with gr.Row():
249
+ in_fillmode = gr.Dropdown(choices=["mean", "zero"],
 
250
  value="mean",
251
  label="Filling method",
252
  visible=False,
253
  scale=2)
254
+ fillmode_btn = gr.Button("OK", visible=False, scale=1)
255
+ chkbox_group = gr.CheckboxGroup(elem_id="chkbox-group", visible=False)
256
+ # step4 : mapping result
257
+ out_json_file = gr.File(label="Mapping result", visible=False)
258
+ res_md = gr.Markdown("""
259
+ (Download this file if you plan to run the models using the source code.)
260
+ """, visible=False)
261
+
262
+ with gr.Row():
263
+ clear_btn = gr.Button("Clear", visible=False)
264
+ step2_btn = gr.Button("Next", visible=False)
265
+ step3_btn = gr.Button("Next", visible=False)
266
+ next_btn = gr.Button("Next step", visible=False)
267
+ # -------------------------------------------------------
268
 
269
  with gr.Column():
270
+ # ------------------------input--------------------------
271
  with gr.Row(variant='panel'):
272
  with gr.Column():
273
  gr.Markdown("# 2.Decode Data")
 
274
  in_data_file = gr.File(label="Raw data (.csv)", file_types=[".csv"])
275
  with gr.Row():
276
  in_modelname = gr.Dropdown(choices=[
 
278
  ("IC-U-Net", "ICUNet"),
279
  ("IC-U-Net++", "UNetpp"),
280
  ("IC-U-Net-Attn", "AttUnet")],
281
+ #"(mapped data)"],
 
282
  value="EEGART",
283
  label="Model",
284
  scale=2)
285
  run_btn = gr.Button(interactive=False, scale=1)
286
+ # ------------------------output-------------------------
287
+ batch_md = gr.Markdown(visible=False)
288
+ out_data_file = gr.File(label="Denoised data", visible=False)
289
+ # -------------------------------------------------------
 
290
 
291
  with gr.Row():
292
  with gr.Tab("ART"):
 
299
  gr.Markdown()
300
  with gr.Tab("README"):
301
  gr.Markdown(readme)
 
 
302
 
303
  # +========================================================================================+
304
  # | Stage1: channel mapping |
 
311
  except OSError as e:
312
  utils.dataDelete(rootpath+"/session_data/")
313
  os.mkdir(rootpath+"/session_data/")
314
+ #print(e)
315
  os.mkdir(rootpath+"/session_data/stage1/")
316
  os.mkdir(rootpath+"/session_data/stage2/")
317
 
 
420
  tpl_img : gr.Image(visible=True),
421
  mapped_img : gr.Image(value=filename2, visible=True),
422
  next_btn : gr.Button(visible=True)}
 
423
 
424
  # ========================================step1=========================================
425
  elif stage1_info["state"] == "step1-finished":
 
696
  # +========================================================================================+
697
  # | Stage1-step2 |
698
  # +========================================================================================+
699
+ # determine which button to display based on the current state
700
  @radio_group.select(inputs = app_info_json, outputs = [step2_btn, next_btn])
701
  def determine_button(app_info):
702
  stage1_info = app_info["stage1"]
703
+ if len(stage1_info["unassignedInputs"]) == 1:
704
  return {step2_btn : gr.Button(visible=False),
705
  next_btn : gr.Button(visible=True)}
706
  else:
707
  return {step2_btn : gr.Button()} # change nothing
708
+ # clear the selected value and reset the buttons
709
  @clear_btn.click(inputs = app_info_json, outputs = [radio_group, step2_btn, next_btn])
710
  def clear_value(app_info):
711
  stage1_info = app_info["stage1"]
 
895
  try:
896
  # step1: Reorder input data
897
  data_shape = app_utils.reorder_data(new_idx, fill_flags, filename, filepath+"temp_data/mapped.csv")
 
 
 
898
  # step2: Data preprocessing
899
  total_file_num = utils.preprocessing(filepath+"temp_data/", "mapped.csv", samplerate)
900
  # step3: Signal reconstruction
901
  utils.reconstruct(modelname, total_file_num, filepath+"temp_data/", "denoised.csv", samplerate)
 
 
 
902
  # step4: Restore original order
903
  app_utils.restore_order(i, data_shape, new_idx, fill_flags, filepath+"temp_data/denoised.csv", new_filename)
 
904
  except FileNotFoundError:
905
  print('break2!!')
906
  break_flag = True
app_utils.py CHANGED
@@ -14,6 +14,7 @@ from sklearn.neighbors import NearestNeighbors
14
  def reorder_data(idx_order, fill_flags, filename, new_filename):
15
  # read the input data
16
  raw_data = utils.read_train_data(filename)
 
17
  new_data = np.zeros((30, raw_data.shape[1]))
18
 
19
  zero_arr = np.zeros((1, raw_data.shape[1]))
@@ -26,7 +27,6 @@ def reorder_data(idx_order, fill_flags, filename, new_filename):
26
  tmp_data = [raw_data[j, :] for j in idx_set]
27
  new_data[i, :] = np.mean(tmp_data, axis=0)
28
 
29
- #print(raw_data.shape, new_data.shape)
30
  utils.save_data(new_data, new_filename)
31
  return raw_data.shape
32
 
@@ -35,6 +35,7 @@ def restore_order(batch_cnt, raw_data_shape, idx_order, fill_flags, filename, ne
35
  d_data = utils.read_train_data(filename)
36
  if batch_cnt == 0:
37
  new_data = np.zeros((raw_data_shape[0], d_data.shape[1]))
 
38
  else:
39
  new_data = utils.read_train_data(new_filename)
40
 
@@ -43,7 +44,6 @@ def restore_order(batch_cnt, raw_data_shape, idx_order, fill_flags, filename, ne
43
  if flag == False:
44
  new_data[idx_set[0], :] = d_data[i, :]
45
 
46
- #print(d_data.shape, new_data.shape)
47
  utils.save_data(new_data, new_filename)
48
  return
49
 
@@ -238,10 +238,8 @@ def match_names(stage1_info, channel_info):
238
  tpl_montage, in_montage, tpl_dict, in_dict = read_montage_data(loc_file)
239
  tpl_order = tpl_montage.ch_names
240
  in_order = in_montage.ch_names
241
- # list to store the indices of the in_channels in the order of tpl_channls
242
- new_idx = [[]]*30
243
- # flags to record if each tpl_channel's data is filled by "fillmode"
244
- fill_flags = [True]*30
245
 
246
  alias_dict = {
247
  'T3': 'T7',
@@ -340,17 +338,17 @@ def optimal_mapping(channel_info):
340
  return mapping_data, channel_info
341
 
342
  def mapping_result(stage1_info, stage2_info, channel_info, filename):
343
- # 1. calculate how many times the model needs to be run
344
  unassigned_num = len(stage1_info["unassignedInputs"])
345
  batch_num = math.ceil(unassigned_num/30) + 1
346
 
347
- # 2. map the remaining in_channels
348
  for i in range(1, batch_num):
349
  # optimally select 30 in_channels to map to the tpl_channels based on proximity
350
  new_mapping_data, channel_info = optimal_mapping(channel_info)
351
  stage1_info["mappingData"] += [new_mapping_data]
352
 
353
- # 3. save the mapping result
354
  new_dict = {
355
  #"templateOrder" : channel_info["templateOrder"],
356
  #"inputOrder" : channel_info["inputOrder"],
 
14
  def reorder_data(idx_order, fill_flags, filename, new_filename):
15
  # read the input data
16
  raw_data = utils.read_train_data(filename)
17
+ #print(raw_data.shape)
18
  new_data = np.zeros((30, raw_data.shape[1]))
19
 
20
  zero_arr = np.zeros((1, raw_data.shape[1]))
 
27
  tmp_data = [raw_data[j, :] for j in idx_set]
28
  new_data[i, :] = np.mean(tmp_data, axis=0)
29
 
 
30
  utils.save_data(new_data, new_filename)
31
  return raw_data.shape
32
 
 
35
  d_data = utils.read_train_data(filename)
36
  if batch_cnt == 0:
37
  new_data = np.zeros((raw_data_shape[0], d_data.shape[1]))
38
+ #print(new_data.shape)
39
  else:
40
  new_data = utils.read_train_data(new_filename)
41
 
 
44
  if flag == False:
45
  new_data[idx_set[0], :] = d_data[i, :]
46
 
 
47
  utils.save_data(new_data, new_filename)
48
  return
49
 
 
238
  tpl_montage, in_montage, tpl_dict, in_dict = read_montage_data(loc_file)
239
  tpl_order = tpl_montage.ch_names
240
  in_order = in_montage.ch_names
241
+ new_idx = [[]]*30 # store the indices of the in_channels in the order of tpl_channls
242
+ fill_flags = [True]*30 # record if each tpl_channel's data is filled by "fillmode"
 
 
243
 
244
  alias_dict = {
245
  'T3': 'T7',
 
338
  return mapping_data, channel_info
339
 
340
  def mapping_result(stage1_info, stage2_info, channel_info, filename):
341
+ # calculate how many times the model needs to be run
342
  unassigned_num = len(stage1_info["unassignedInputs"])
343
  batch_num = math.ceil(unassigned_num/30) + 1
344
 
345
+ # map the remaining in_channels
346
  for i in range(1, batch_num):
347
  # optimally select 30 in_channels to map to the tpl_channels based on proximity
348
  new_mapping_data, channel_info = optimal_mapping(channel_info)
349
  stage1_info["mappingData"] += [new_mapping_data]
350
 
351
+ # save the mapping result
352
  new_dict = {
353
  #"templateOrder" : channel_info["templateOrder"],
354
  #"inputOrder" : channel_info["inputOrder"],