mebubo commited on
Commit
1f2d72c
·
1 Parent(s): be53c78

feat: Add tqdm for progress tracking and time measurement in loop

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -1,5 +1,6 @@
1
  #%%
2
  import time
 
3
  from text_processing import split_into_words, Word
4
  import torch
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, PreTrainedModel, PreTrainedTokenizer
@@ -64,7 +65,8 @@ low_prob_words = [word for word in words if word.logprob < log_prob_threshold]
64
 
65
  start_time = time.time()
66
 
67
- for word in low_prob_words:
 
68
  prefix_index = word.first_token_index
69
  prefix_tokens = [token for token, _ in result][:prefix_index + 1]
70
  prefix = tokenizer.convert_tokens_to_string(prefix_tokens)
@@ -72,5 +74,8 @@ for word in low_prob_words:
72
  print(f"Original word: {word.text}, Log Probability: {word.logprob:.4f}")
73
  print(f"Proposed replacements: {replacements}")
74
  print()
75
- end_time = time.time()
76
- print(f"Time taken for the loop: {end_time - start_time:.4f} seconds")
 
 
 
 
1
  #%%
2
  import time
3
+ from tqdm import tqdm
4
  from text_processing import split_into_words, Word
5
  import torch
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, PreTrainedModel, PreTrainedTokenizer
 
65
 
66
  start_time = time.time()
67
 
68
+ for word in tqdm(low_prob_words, desc="Processing words"):
69
+ iteration_start_time = time.time()
70
  prefix_index = word.first_token_index
71
  prefix_tokens = [token for token, _ in result][:prefix_index + 1]
72
  prefix = tokenizer.convert_tokens_to_string(prefix_tokens)
 
74
  print(f"Original word: {word.text}, Log Probability: {word.logprob:.4f}")
75
  print(f"Proposed replacements: {replacements}")
76
  print()
77
+ iteration_end_time = time.time()
78
+ print(f"Time taken for this iteration: {iteration_end_time - iteration_start_time:.4f} seconds")
79
+
80
+ end_time = time.time()
81
+ print(f"Total time taken for the loop: {end_time - start_time:.4f} seconds")