chongzhou commited on
Commit
b8b46fe
·
1 Parent(s): c306abe

get session_id

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +5 -5
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ EdgeTAM.egg-info/
app.py CHANGED
@@ -97,7 +97,7 @@ def reset(
97
  session_input_labels,
98
  request: gr.Request,
99
  ):
100
- session_id = request.session_id
101
  predictor.to("cpu")
102
  session_input_points = []
103
  session_input_labels = []
@@ -125,7 +125,7 @@ def clear_points(
125
  session_input_labels,
126
  request: gr.Request,
127
  ):
128
- session_id = request.session_id
129
  predictor.to("cpu")
130
  session_input_points = []
131
  session_input_labels = []
@@ -148,7 +148,7 @@ def preprocess_video_in(
148
  session_input_labels,
149
  request: gr.Request,
150
  ):
151
- session_id = request.session_id
152
  predictor.to("cpu")
153
  if video_path is None:
154
  return (
@@ -225,7 +225,7 @@ def segment_with_points(
225
  evt: gr.SelectData,
226
  request: gr.Request,
227
  ):
228
- session_id = request.session_id
229
  if torch.cuda.get_device_properties(0).major >= 8:
230
  torch.backends.cuda.matmul.allow_tf32 = True
231
  torch.backends.cudnn.allow_tf32 = True
@@ -308,7 +308,7 @@ def propagate_to_all(
308
  session_all_frames,
309
  request: gr.Request,
310
  ):
311
- session_id = request.session_id
312
  predictor.to("cuda")
313
  if torch.cuda.get_device_properties(0).major >= 8:
314
  torch.backends.cuda.matmul.allow_tf32 = True
 
97
  session_input_labels,
98
  request: gr.Request,
99
  ):
100
+ session_id = request.session.hash
101
  predictor.to("cpu")
102
  session_input_points = []
103
  session_input_labels = []
 
125
  session_input_labels,
126
  request: gr.Request,
127
  ):
128
+ session_id = request.session.hash
129
  predictor.to("cpu")
130
  session_input_points = []
131
  session_input_labels = []
 
148
  session_input_labels,
149
  request: gr.Request,
150
  ):
151
+ session_id = request.session.hash
152
  predictor.to("cpu")
153
  if video_path is None:
154
  return (
 
225
  evt: gr.SelectData,
226
  request: gr.Request,
227
  ):
228
+ session_id = request.session.hash
229
  if torch.cuda.get_device_properties(0).major >= 8:
230
  torch.backends.cuda.matmul.allow_tf32 = True
231
  torch.backends.cudnn.allow_tf32 = True
 
308
  session_all_frames,
309
  request: gr.Request,
310
  ):
311
+ session_id = request.session.hash
312
  predictor.to("cuda")
313
  if torch.cuda.get_device_properties(0).major >= 8:
314
  torch.backends.cuda.matmul.allow_tf32 = True