Update app.py
Browse files
app.py
CHANGED
@@ -24,12 +24,6 @@ def stop_generation():
|
|
24 |
return "Generation stopped."
|
25 |
|
26 |
def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
27 |
-
print(X0)
|
28 |
-
print(X1)
|
29 |
-
print(X2)
|
30 |
-
print(τ)
|
31 |
-
print(g_num)
|
32 |
-
print(model_name)
|
33 |
global is_stopped
|
34 |
is_stopped = False
|
35 |
|
@@ -41,7 +35,6 @@ def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
|
41 |
train_seq = train_seqs['Seq'].tolist()
|
42 |
model = torch.load(save_path, map_location=torch.device('cpu'))
|
43 |
model = model.to(device)
|
44 |
-
print(model)
|
45 |
|
46 |
X3 = "X" * len(X0)
|
47 |
msa_data = pd.read_csv('conoData_C0.csv')
|
@@ -95,6 +88,7 @@ def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
|
95 |
|
96 |
start_time = time.time()
|
97 |
while count < gen_num:
|
|
|
98 |
if is_stopped:
|
99 |
return pd.DataFrame(), "output.csv"
|
100 |
|
@@ -102,7 +96,6 @@ def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
|
102 |
break
|
103 |
|
104 |
seq = [f"{X1}|{X2}|{X3}|{X4}|{X5}|{X6}"]
|
105 |
-
print(seq)
|
106 |
vocab_mlm.token_to_idx["X"] = 4
|
107 |
|
108 |
padded_seq, _, _, _ = get_paded_token_idx_gen(vocab_mlm, seq, new_seq)
|
@@ -110,9 +103,7 @@ def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
|
110 |
|
111 |
gen_length = len(input_text)
|
112 |
length = gen_length - sum(1 for x in input_text if x != '[MASK]')
|
113 |
-
print(input_text)
|
114 |
for i in range(length):
|
115 |
-
print(i)
|
116 |
if is_stopped:
|
117 |
return pd.DataFrame(), "output.csv"
|
118 |
|
@@ -135,7 +126,6 @@ def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
|
135 |
new_seq = padded_seq
|
136 |
|
137 |
generated_seq = input_text
|
138 |
-
print(generated_seq)
|
139 |
|
140 |
generated_seq[1] = "[MASK]"
|
141 |
input_ids = vocab_mlm.__getitem__(generated_seq)
|
|
|
24 |
return "Generation stopped."
|
25 |
|
26 |
def CTXGen(X0, X1, X2, τ, g_num, model_name):
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
global is_stopped
|
28 |
is_stopped = False
|
29 |
|
|
|
35 |
train_seq = train_seqs['Seq'].tolist()
|
36 |
model = torch.load(save_path, map_location=torch.device('cpu'))
|
37 |
model = model.to(device)
|
|
|
38 |
|
39 |
X3 = "X" * len(X0)
|
40 |
msa_data = pd.read_csv('conoData_C0.csv')
|
|
|
88 |
|
89 |
start_time = time.time()
|
90 |
while count < gen_num:
|
91 |
+
gen_len = len(X0)
|
92 |
if is_stopped:
|
93 |
return pd.DataFrame(), "output.csv"
|
94 |
|
|
|
96 |
break
|
97 |
|
98 |
seq = [f"{X1}|{X2}|{X3}|{X4}|{X5}|{X6}"]
|
|
|
99 |
vocab_mlm.token_to_idx["X"] = 4
|
100 |
|
101 |
padded_seq, _, _, _ = get_paded_token_idx_gen(vocab_mlm, seq, new_seq)
|
|
|
103 |
|
104 |
gen_length = len(input_text)
|
105 |
length = gen_length - sum(1 for x in input_text if x != '[MASK]')
|
|
|
106 |
for i in range(length):
|
|
|
107 |
if is_stopped:
|
108 |
return pd.DataFrame(), "output.csv"
|
109 |
|
|
|
126 |
new_seq = padded_seq
|
127 |
|
128 |
generated_seq = input_text
|
|
|
129 |
|
130 |
generated_seq[1] = "[MASK]"
|
131 |
input_ids = vocab_mlm.__getitem__(generated_seq)
|