audrey06100 commited on
Commit
06398f6
·
1 Parent(s): 046b0b5
Files changed (2) hide show
  1. app.py +27 -27
  2. app_utils.py +2 -2
app.py CHANGED
@@ -57,17 +57,25 @@ icunet = """
57
  Electroencephalography (EEG) signals are often contaminated with artifacts. It is imperative to develop a practical and reliable artifact removal method to prevent the misinterpretation of neural signals and the underperformance of brain–computer interfaces. Based on the U-Net architecture, we developed a new artifact removal model, IC-U-Net, for removing pervasive EEG artifacts and reconstructing brain signals. IC-U-Net was trained using mixtures of brain and non-brain components decomposed by independent component analysis. It uses an ensemble of loss functions to model complex signal fluctuations in EEG recordings. The effectiveness of the proposed method in recovering brain activities and removing various artifacts (e.g., eye blinks/movements, muscle activities, and line/channel noise) was demonstrated in a simulation study and four real-world EEG experiments. IC-U-Net can reconstruct a multi-channel EEG signal and is applicable to most artifact types, offering a promising end-to-end solution for automatically removing artifacts from EEG recordings. It also meets the increasing need to image natural brain dynamics in a mobile setting.
58
  """
59
 
60
- observe_js = """
61
  () => {
62
- const observer = new ResizeObserver((entries) => {
63
- entries.forEach((entry) => {
64
- const target = entry.target;
65
- const newWidth = entry.contentRect.width;
66
- target.style.setProperty("--indicator-size", `${newWidth*0.025}px`);
67
- });
68
- });
69
- observer.observe(document.querySelector("#radio-group > div:nth-of-type(2)"));
70
- observer.observe(document.querySelector("#chkbox-group > div:nth-of-type(2)"));
 
 
 
 
 
 
 
 
71
  }
72
  """
73
 
@@ -86,7 +94,6 @@ init_js = """
86
  }else return;
87
 
88
  const div = document.querySelector(selector);
89
- div.style.setProperty("--indicator-size", `${div.clientWidth*0.025}px`);
90
 
91
  // add figure of the input montage
92
  div.style.cssText = `
@@ -106,16 +113,10 @@ init_js = """
106
  left = channel_info.inputDict[name].css_position[0];
107
  bottom = channel_info.inputDict[name].css_position[1];
108
 
109
- el.style.cssText = `position: absolute; left: ${left}%; bottom: ${bottom}%;`;
110
- el.className = "";
 
111
  el.querySelector(":scope > span").innerText = "";
112
- el.querySelector(":scope > input").style.cssText = `
113
- position: absolute;
114
- z-index: 2;
115
- width: var(--indicator-size);
116
- height: var(--indicator-size);
117
- transform: translate(-50%, -50%);
118
- `;
119
  });
120
 
121
  // add indication for the first empty tpl_channel
@@ -128,8 +129,8 @@ init_js = """
128
  position: absolute;
129
  left: ${left}%;
130
  bottom: ${bottom}%;
131
- width: var(--indicator-size);
132
- height: var(--indicator-size);
133
  border-radius: 50%;
134
  background-color: red;
135
  }
@@ -177,8 +178,7 @@ update_js = """
177
  name = el.querySelector(":scope > input").value;
178
  left = channel_info.inputDict[name].css_position[0];
179
  bottom = channel_info.inputDict[name].css_position[1];
180
- el.style.left = `${left}%`;
181
- el.style.bottom = `${bottom}%`;
182
  });
183
  }else if(stage1_info.state == "step3-2-selecting"){
184
  selector = "#chkbox-group > div:nth-of-type(2)";
@@ -195,8 +195,8 @@ update_js = """
195
  position: absolute;
196
  left: ${left}%;
197
  bottom: ${bottom}%;
198
- width: var(--indicator-size);
199
- height: var(--indicator-size);
200
  border-radius: 50%;
201
  background-color: red;
202
  }
@@ -228,7 +228,7 @@ update_js = """
228
  }
229
  """
230
 
231
- with gr.Blocks(js=observe_js, delete_cache=(3600, 3600)) as demo:
232
  session_dir = gr.State("")
233
  stage1_json = gr.JSON({}, visible=False)
234
  stage2_json = gr.JSON({}, visible=False)
 
57
  Electroencephalography (EEG) signals are often contaminated with artifacts. It is imperative to develop a practical and reliable artifact removal method to prevent the misinterpretation of neural signals and the underperformance of brain–computer interfaces. Based on the U-Net architecture, we developed a new artifact removal model, IC-U-Net, for removing pervasive EEG artifacts and reconstructing brain signals. IC-U-Net was trained using mixtures of brain and non-brain components decomposed by independent component analysis. It uses an ensemble of loss functions to model complex signal fluctuations in EEG recordings. The effectiveness of the proposed method in recovering brain activities and removing various artifacts (e.g., eye blinks/movements, muscle activities, and line/channel noise) was demonstrated in a simulation study and four real-world EEG experiments. IC-U-Net can reconstruct a multi-channel EEG signal and is applicable to most artifact types, offering a promising end-to-end solution for automatically removing artifacts from EEG recordings. It also meets the increasing need to image natural brain dynamics in a mobile setting.
58
  """
59
 
60
+ js = """
61
  () => {
62
+ const styleSheet = document.styleSheets[0];
63
+ styleSheet.insertRule(`
64
+ .channel-box {
65
+ position: absolute;
66
+ z-index: 2;
67
+ width: 2.5%;
68
+ height: 2.5%;
69
+ transform: translate(-50%, 50%);
70
+ }
71
+ `, styleSheet.cssRules.length);
72
+ styleSheet.insertRule(`
73
+ .channel-input {
74
+ display: block !important;
75
+ width: 100% !important;
76
+ height: 100% !important;
77
+ }
78
+ `, styleSheet.cssRules.length);
79
  }
80
  """
81
 
 
94
  }else return;
95
 
96
  const div = document.querySelector(selector);
 
97
 
98
  // add figure of the input montage
99
  div.style.cssText = `
 
113
  left = channel_info.inputDict[name].css_position[0];
114
  bottom = channel_info.inputDict[name].css_position[1];
115
 
116
+ el.className = "channel-box";
117
+ el.style.cssText = `left: ${left}%; bottom: ${bottom}%;`;
118
+ el.querySelector(":scope > input").classList.add("channel-input");
119
  el.querySelector(":scope > span").innerText = "";
 
 
 
 
 
 
 
120
  });
121
 
122
  // add indication for the first empty tpl_channel
 
129
  position: absolute;
130
  left: ${left}%;
131
  bottom: ${bottom}%;
132
+ width: 2%;
133
+ height: 2%;
134
  border-radius: 50%;
135
  background-color: red;
136
  }
 
178
  name = el.querySelector(":scope > input").value;
179
  left = channel_info.inputDict[name].css_position[0];
180
  bottom = channel_info.inputDict[name].css_position[1];
181
+ el.style.cssText = `left: ${left}%; bottom: ${bottom}%;`;
 
182
  });
183
  }else if(stage1_info.state == "step3-2-selecting"){
184
  selector = "#chkbox-group > div:nth-of-type(2)";
 
195
  position: absolute;
196
  left: ${left}%;
197
  bottom: ${bottom}%;
198
+ width: 2%;
199
+ height: 2%;
200
  border-radius: 50%;
201
  background-color: red;
202
  }
 
228
  }
229
  """
230
 
231
+ with gr.Blocks(js=js, delete_cache=(3600, 3600)) as demo:
232
  session_dir = gr.State("")
233
  stage1_json = gr.JSON({}, visible=False)
234
  stage2_json = gr.JSON({}, visible=False)
app_utils.py CHANGED
@@ -187,7 +187,7 @@ def save_figure(channel_info, tpl_montage, filename1, filename2):
187
  # plot in_channels on it
188
  ax.scatter(in_coords[:,0], in_coords[:,1], s=35, color='black')
189
  for i, name in enumerate(in_names):
190
- ax.text(in_coords[i,0]+0.003, in_coords[i,1], name, color='black', fontsize=10.0, va='center')
191
  # save input_montage
192
  fig.savefig(filename1)
193
 
@@ -197,7 +197,7 @@ def save_figure(channel_info, tpl_montage, filename1, filename2):
197
  if indices != []:
198
  ax.scatter(in_coords[indices,0], in_coords[indices,1], s=35, color='red')
199
  for i in indices:
200
- ax.text(in_coords[i,0]+0.003, in_coords[i,1], in_names[i], color='red', fontsize=10.0, va='center')
201
  # save mapped_montage
202
  fig.savefig(filename2)
203
 
 
187
  # plot in_channels on it
188
  ax.scatter(in_coords[:,0], in_coords[:,1], s=35, color='black')
189
  for i, name in enumerate(in_names):
190
+ ax.text(in_coords[i,0]+0.004, in_coords[i,1], name, color='black', fontsize=10.0, va='center')
191
  # save input_montage
192
  fig.savefig(filename1)
193
 
 
197
  if indices != []:
198
  ax.scatter(in_coords[indices,0], in_coords[indices,1], s=35, color='red')
199
  for i in indices:
200
+ ax.text(in_coords[i,0]+0.004, in_coords[i,1], in_names[i], color='red', fontsize=10.0, va='center')
201
  # save mapped_montage
202
  fig.savefig(filename2)
203