audrey06100 commited on
Commit
6ad8262
·
1 Parent(s): 65075f3
Files changed (1) hide show
  1. app.py +17 -14
app.py CHANGED
@@ -10,27 +10,31 @@ import mne
10
  from mne.channels import read_custom_montage
11
 
12
  quickstart = """
13
- # Quickstart
14
 
15
- ### Raw data
16
  1. The data need to be a two-dimensional array (channel, timepoint).
17
  2. Upload your EEG data in `.csv` format.
18
 
19
- ### Channel locations
20
  Upload your data's channel locations in `.loc` format, which can be obtained using **EEGLAB**.
21
  >If you cannot obtain it, we recommend you to download the standard montage <a href="">here</a>. If the channels in those files doesn't match yours, you can use **EEGLAB** to modify them to your needed montage.
22
 
23
- ### Mapping
24
 
25
- - Step1: Mapping result
 
 
 
 
26
 
27
- - Step2:
 
 
 
 
28
 
29
- - Step3:
30
 
31
- ### Model
32
-
33
- ### Run model
34
 
35
  ### Imputation
36
  The models was trained using the EEG signals of 30 channels, including: `Fp1, Fp2, F7, F3, Fz, F4, F8, FT7, FC3, FCz, FC4, FT8, T7, C3, Cz, C4, T8, TP7, CP3, CPz, CP4, TP8, P7, P3, Pz, P4, P8, O1, Oz, O2`.
@@ -325,7 +329,7 @@ with gr.Blocks() as demo:
325
  tpl_montage : gr.Image(visible=False),
326
  in_montage : gr.Image(value=None, visible=False),
327
  radio : gr.Radio(choices=[], value=[], label="", visible=False),
328
- in_fill_mode : gr.Dropdown(visible=False),
329
  chkbox_group : gr.CheckboxGroup(choices=[], value=[], label="", visible=False),
330
  fillmode_btn : gr.Button("OK", visible=False),
331
  step2_btn : gr.Button("Next", visible=False),
@@ -342,7 +346,6 @@ with gr.Blocks() as demo:
342
  filename1 = filepath+"raw_montage_"+str(random.randint(1,10000))+".png"
343
  filename2 = filepath+"mapped_montage_"+str(random.randint(1,10000))+".png"
344
 
345
- #app_state["filenames"]["raw_montage"] = filename1
346
  app_state["filenames"].update({
347
  "raw_montage" : filename1,
348
  "mapped_montage" : filename2
@@ -372,7 +375,7 @@ with gr.Blocks() as demo:
372
  gr.Info('The mapping process is finished!')
373
 
374
  return {app_state_json : app_state,
375
- desc_md : gr.Markdown("### Step1: Mapping result", visible=True),
376
  tpl_montage : gr.Image(visible=True),
377
  in_montage : gr.Image(value=filename2, visible=True),
378
  run_btn : gr.Button(interactive=True)}
@@ -643,7 +646,7 @@ with gr.Blocks() as demo:
643
  fillmode_btn.click(
644
  fn = fill_value,
645
  inputs = [app_state_json, channel_info_json, in_fill_mode],
646
- outputs = [app_state_json, in_fill_mode, fillmode_btn, chkbox_group, step3_btn, next_btn, run_btn]
647
  ).success(
648
  fn = None,
649
  js = init_js,
 
10
  from mne.channels import read_custom_montage
11
 
12
  quickstart = """
 
13
 
14
+ ## Raw data
15
  1. The data need to be a two-dimensional array (channel, timepoint).
16
  2. Upload your EEG data in `.csv` format.
17
 
18
+ ## Channel locations
19
  Upload your data's channel locations in `.loc` format, which can be obtained using **EEGLAB**.
20
  >If you cannot obtain it, we recommend you to download the standard montage <a href="">here</a>. If the channels in those files doesn't match yours, you can use **EEGLAB** to modify them to your needed montage.
21
 
22
+ ## Mapping
23
 
24
+ ### Step1: Mapping result
25
+ After clicking the mapping button, the **template montage** and the **input montage** will be displayed, with the unmatched input channels highlighted in red.
26
+ - If your data includes all the 30 template channels, then you can proceed directly to **run the models**.
27
+ - If some of your channels didn't match the template, you will be guided to **Step2**.
28
+ - If all of your channels can find corresponding template channels, but the number of channels is less then 30, you will be guided to **Step3**.
29
 
30
+ ### Step2:
31
+ In this step, the template channels that didn't match yours will be displyed sequentially. You will then have the option to choose whether to fill some of your unmatched channels into them.
32
+ - If you
33
+ -
34
+ ### Step3:
35
 
 
36
 
37
+ # Run model
 
 
38
 
39
  ### Imputation
40
  The models was trained using the EEG signals of 30 channels, including: `Fp1, Fp2, F7, F3, Fz, F4, F8, FT7, FC3, FCz, FC4, FT8, T7, C3, Cz, C4, T8, TP7, CP3, CPz, CP4, TP8, P7, P3, Pz, P4, P8, O1, Oz, O2`.
 
329
  tpl_montage : gr.Image(visible=False),
330
  in_montage : gr.Image(value=None, visible=False),
331
  radio : gr.Radio(choices=[], value=[], label="", visible=False),
332
+ in_fill_mode : gr.Dropdown(value="mean", visible=False),
333
  chkbox_group : gr.CheckboxGroup(choices=[], value=[], label="", visible=False),
334
  fillmode_btn : gr.Button("OK", visible=False),
335
  step2_btn : gr.Button("Next", visible=False),
 
346
  filename1 = filepath+"raw_montage_"+str(random.randint(1,10000))+".png"
347
  filename2 = filepath+"mapped_montage_"+str(random.randint(1,10000))+".png"
348
 
 
349
  app_state["filenames"].update({
350
  "raw_montage" : filename1,
351
  "mapped_montage" : filename2
 
375
  gr.Info('The mapping process is finished!')
376
 
377
  return {app_state_json : app_state,
378
+ desc_md : gr.Markdown("### Mapping result", visible=True),
379
  tpl_montage : gr.Image(visible=True),
380
  in_montage : gr.Image(value=filename2, visible=True),
381
  run_btn : gr.Button(interactive=True)}
 
646
  fillmode_btn.click(
647
  fn = fill_value,
648
  inputs = [app_state_json, channel_info_json, in_fill_mode],
649
+ outputs = [app_state_json, desc_md, in_fill_mode, fillmode_btn, chkbox_group, step3_btn, next_btn, run_btn]
650
  ).success(
651
  fn = None,
652
  js = init_js,