cetinca commited on
Commit
43ac953
·
verified ·
1 Parent(s): 72f4be8

New version text2int

Browse files
Files changed (5) hide show
  1. .gitignore +3 -0
  2. app.py +155 -143
  3. plot_calls.py +23 -3
  4. test_api.py +3 -3
  5. test_api.sh +41 -24
.gitignore CHANGED
@@ -100,3 +100,6 @@ docs/**/*.html
100
  **/*private*
101
  /call_history.csv
102
  /call_history.txt
 
 
 
 
100
  **/*private*
101
  /call_history.csv
102
  /call_history.txt
103
+ /output.csv
104
+ /call_history_bash.csv
105
+ /call_history_sentiment_bash.csv
app.py CHANGED
@@ -1,20 +1,15 @@
1
- import inspect
2
- import json
3
- import logging
4
- import os
5
- from typing import List, Type
6
-
7
  import gradio as gr
8
  import spacy # noqa
9
- from dotenv import load_dotenv
10
- from gradio import routes
11
  from transformers import pipeline
12
 
13
- load_dotenv()
 
 
14
 
15
- TOKENS2INT_ERROR_INT = 32202
 
16
 
17
- log = logging.getLogger()
18
 
19
  ONES = [
20
  "zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
@@ -22,20 +17,55 @@ ONES = [
22
  "sixteen", "seventeen", "eighteen", "nineteen",
23
  ]
24
 
25
- # token_mapping = json.load(open('str_mapping.json'))
26
  CHAR_MAPPING = {
27
  "-": " ",
28
  "_": " ",
 
 
 
 
 
 
29
  }
30
- CHAR_MAPPING.update((str(i), word) for i, word in enumerate([" " + s + " " for s in ONES]))
31
 
32
- TOKEN_MAPPING = dict(enumerate([" " + s + " " for s in ONES]))
33
 
34
- BQ_JSON = os.environ['BQ_JSON']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
 
37
  def tokenize(text):
38
- return text.split()
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
 
41
  def detokenize(tokens):
@@ -47,96 +77,122 @@ def replace_tokens(tokens, token_mapping=TOKEN_MAPPING):
47
 
48
 
49
  def replace_chars(text, char_mapping=CHAR_MAPPING):
50
- return ''.join((char_mapping.get(c, c) for c in text))
51
-
52
-
53
- def tokens2int(tokens, numwords={}):
54
- """ Convert an English str containing number words into an int
55
- >>> text2int("nine")
56
- 9
57
- >>> text2int("forty two")
58
- 42
59
- >>> text2int("1 2 three")
60
- 123
61
- """
62
- if not numwords:
63
-
64
- tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
65
 
66
- scales = ["hundred", "thousand", "million", "billion", "trillion"]
67
 
68
- numwords["and"] = (1, 0)
 
 
 
 
69
  for idx, word in enumerate(ONES):
70
- numwords[word] = (1, idx)
71
  for idx, word in enumerate(tens):
72
- numwords[word] = (1, idx * 10)
73
  for idx, word in enumerate(scales):
74
- numwords[word] = (10 ** (idx * 3 or 2), 0)
75
-
76
- current = result = 0
77
-
78
- for word in tokens:
79
- if word not in numwords:
80
- raise Exception("Illegal word: " + word)
81
-
82
- scale, increment = numwords[word]
83
- current = current * scale + increment
84
- if scale > 100:
85
- result += current
86
- current = 0
87
-
88
- return str(result + current)
89
-
90
-
91
- def text2int(text):
92
- return tokens2int(tokenize(replace_chars(text)))
93
-
94
-
95
- def try_text2int(text):
96
- text = str(text)
97
  try:
98
- intstr = tokens2int(tokens2int(tokenize(replace_chars(text))))
99
- except Exception as e:
100
- log.error(str(e))
101
- log.error(f'User input: {text}')
102
- intstr = TOKENS2INT_ERROR_INT
103
- return str(intstr)
104
-
105
-
106
- def try_text2int_preprocessed(text):
107
- text = str(text)
108
- try:
109
- tokens = replace_tokens(tokenize(replace_chars(str(text))))
110
- except Exception as e:
111
- log.error(str(e))
112
- tokens = text.split()
113
- try:
114
- intstr = tokens2int(tokens)
115
- except Exception as e:
116
- log.error(str(e))
117
- intstr = str(TOKENS2INT_ERROR_INT)
118
- return intstr
119
-
120
-
121
- def get_types(cls_set: List[Type], component: str):
122
- docset = []
123
  types = []
124
- if component == "input":
125
- for cls in cls_set:
126
- doc = inspect.getdoc(cls)
127
- doc_lines = doc.split("\n")
128
- docset.append(doc_lines[1].split(":")[-1])
129
- types.append(doc_lines[1].split(")")[0].split("(")[-1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  else:
131
- for cls in cls_set:
132
- doc = inspect.getdoc(cls)
133
- doc_lines = doc.split("\n")
134
- docset.append(doc_lines[-1].split(":")[-1])
135
- types.append(doc_lines[-1].split(")")[0].split("(")[-1])
136
- return docset, types
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
 
139
- routes.get_types = get_types
 
 
 
140
 
141
  sentiment = pipeline(task="sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
142
 
@@ -159,7 +215,7 @@ with gr.Blocks() as html_block:
159
  button_text2int = gr.Button("text2int")
160
 
161
  button_text2int.click(
162
- fn=try_text2int,
163
  inputs=inputs_text2int,
164
  outputs=outputs_text2int,
165
  api_name="text2int",
@@ -173,7 +229,6 @@ with gr.Blocks() as html_block:
173
  gr.Examples(examples=examples_text2int, inputs=inputs_text2int)
174
 
175
  gr.Markdown(r"""
176
-
177
  ## API
178
  ```python
179
  import requests
@@ -188,49 +243,7 @@ with gr.Blocks() as html_block:
188
  ```bash
189
  curl -X POST https://tangibleai-mathtext.hf.space/run/text2int -H 'Content-Type: application/json' -d '{"data": ["one hundred forty five"]}'
190
  ```
191
- {bq_json}""" + f"{json.loads(BQ_JSON)['type']}")
192
-
193
- with gr.Tab("Text to integer preprocessed"):
194
- inputs_text2int_preprocessed = [
195
- gr.Text(placeholder="Type a number as text or a sentence", label="Text to process",
196
- value="forty two"),
197
- ]
198
-
199
- outputs_text2int_preprocessed = gr.Textbox(label="Output integer")
200
-
201
- button_text2int = gr.Button("text2int preprocessed")
202
-
203
- button_text2int.click(
204
- fn=try_text2int_preprocessed,
205
- inputs=inputs_text2int_preprocessed,
206
- outputs=outputs_text2int_preprocessed,
207
- api_name="text2int-preprocessed",
208
- )
209
-
210
- examples_text2int_preprocessed = [
211
- "one thousand forty seven",
212
- "one hundred",
213
- ]
214
-
215
- gr.Examples(examples=examples_text2int_preprocessed, inputs=inputs_text2int_preprocessed)
216
-
217
- gr.Markdown(r"""
218
-
219
- ## API
220
- ```python
221
- import requests
222
-
223
- requests.post(
224
- url="https://tangibleai-mathtext.hf.space/run/text2int-preprocessed", json={"data": ["one hundred forty five"]}
225
- ).json()
226
- ```
227
-
228
- Or using `curl`:
229
-
230
- ```bash
231
- curl -X POST https://tangibleai-mathtext.hf.space/run/text2int-preprocessed -H 'Content-Type: application/json' -d '{"data": ["one hundred forty five"]}'
232
- ```
233
- {bq_json}""" + f"{json.loads(BQ_JSON)['type']}")
234
 
235
  with gr.Tab("Sentiment Analysis"):
236
  inputs_sentiment = [
@@ -257,7 +270,6 @@ with gr.Blocks() as html_block:
257
  gr.Examples(examples=examples_sentiment, inputs=inputs_sentiment)
258
 
259
  gr.Markdown(r"""
260
-
261
  ## API
262
  ```python
263
  import requests
@@ -272,7 +284,7 @@ with gr.Blocks() as html_block:
272
  ```bash
273
  curl -X POST https://tangibleai-mathtext.hf.space/run/sentiment-analysis -H 'Content-Type: application/json' -d '{"data": ["You are right!"]}'
274
  ```
275
- {bq_json}""" + f"{json.loads(BQ_JSON)['type']}")
276
 
277
  # interface = gr.Interface(lambda x: x, inputs=["text"], outputs=["text"])
278
  # html_block.input_components = interface.input_components
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import spacy # noqa
 
 
3
  from transformers import pipeline
4
 
5
+ # import os
6
+ # os.environ['KMP_DUPLICATE_LIB_OK']='True'
7
+ # import spacy
8
 
9
+ # Change this according to what words should be corrected to
10
+ SPELL_CORRECT_MIN_CHAR_DIFF = 2
11
 
12
+ TOKENS2INT_ERROR_INT = 32202
13
 
14
  ONES = [
15
  "zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
 
17
  "sixteen", "seventeen", "eighteen", "nineteen",
18
  ]
19
 
 
20
  CHAR_MAPPING = {
21
  "-": " ",
22
  "_": " ",
23
+ "and": " ",
24
+ }
25
+ # CHAR_MAPPING.update((str(i), word) for i, word in enumerate([" " + s + " " for s in ONES]))
26
+ TOKEN_MAPPING = {
27
+ "and": " ",
28
+ "oh": "0",
29
  }
 
30
 
 
31
 
32
+ def find_char_diff(a, b):
33
+ # Finds the character difference between two str objects by counting the occurences of every character. Not edit distance.
34
+ char_counts_a = {}
35
+ char_counts_b = {}
36
+ for char in a:
37
+ if char in char_counts_a.keys():
38
+ char_counts_a[char] += 1
39
+ else:
40
+ char_counts_a[char] = 1
41
+ for char in b:
42
+ if char in char_counts_b.keys():
43
+ char_counts_b[char] += 1
44
+ else:
45
+ char_counts_b[char] = 1
46
+ char_diff = 0
47
+ for i in char_counts_a:
48
+ if i in char_counts_b.keys():
49
+ char_diff += abs(char_counts_a[i] - char_counts_b[i])
50
+ else:
51
+ char_diff += char_counts_a[i]
52
+ return char_diff
53
 
54
 
55
  def tokenize(text):
56
+ text = text.lower()
57
+ # print(text)
58
+ text = replace_tokens(''.join(i for i in replace_chars(text)).split())
59
+ # print(text)
60
+ text = [i for i in text if i != ' ']
61
+ # print(text)
62
+ output = []
63
+ for word in text:
64
+ # print(word)
65
+ output.append(convert_word_to_int(word))
66
+ output = [i for i in output if i != ' ']
67
+ # print(output)
68
+ return output
69
 
70
 
71
  def detokenize(tokens):
 
77
 
78
 
79
  def replace_chars(text, char_mapping=CHAR_MAPPING):
80
+ return [char_mapping.get(c, c) for c in text]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
 
82
 
83
+ def convert_word_to_int(in_word, numwords={}):
84
+ # Converts a single word/str into a single int
85
+ tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
86
+ scales = ["hundred", "thousand", "million", "billion", "trillion"]
87
+ if not numwords:
88
  for idx, word in enumerate(ONES):
89
+ numwords[word] = idx
90
  for idx, word in enumerate(tens):
91
+ numwords[word] = idx * 10
92
  for idx, word in enumerate(scales):
93
+ numwords[word] = 10 ** (idx * 3 or 2)
94
+ if in_word in numwords:
95
+ # print(in_word)
96
+ # print(numwords[in_word])
97
+ return numwords[in_word]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  try:
99
+ int(in_word)
100
+ return int(in_word)
101
+ except ValueError:
102
+ pass
103
+ # Spell correction using find_char_diff
104
+ char_diffs = [find_char_diff(in_word, i) for i in ONES + tens + scales]
105
+ min_char_diff = min(char_diffs)
106
+ if min_char_diff <= SPELL_CORRECT_MIN_CHAR_DIFF:
107
+ return char_diffs.index(min_char_diff)
108
+
109
+
110
+ def tokens2int(tokens):
111
+ # Takes a list of tokens and returns a int representation of them
 
 
 
 
 
 
 
 
 
 
 
 
112
  types = []
113
+ for i in tokens:
114
+ if i <= 9:
115
+ types.append(1)
116
+
117
+ elif i <= 90:
118
+ types.append(2)
119
+
120
+ else:
121
+ types.append(3)
122
+ # print(tokens)
123
+ if len(tokens) <= 3:
124
+ current = 0
125
+ for i, number in enumerate(tokens):
126
+ if i != 0 and types[i] < types[i - 1] and current != tokens[i - 1] and types[i - 1] != 3:
127
+ current += tokens[i] + tokens[i - 1]
128
+ elif current <= tokens[i] and current != 0:
129
+ current *= tokens[i]
130
+ elif 3 not in types and 1 not in types:
131
+ current = int(''.join(str(i) for i in tokens))
132
+ break
133
+ elif '111' in ''.join(str(i) for i in types) and 2 not in types and 3 not in types:
134
+ current = int(''.join(str(i) for i in tokens))
135
+ break
136
+ else:
137
+ current += number
138
+
139
+ elif 3 not in types and 2 not in types:
140
+ current = int(''.join(str(i) for i in tokens))
141
+
142
  else:
143
+ """
144
+ double_list = []
145
+ current_double = []
146
+ double_type_list = []
147
+ for i in tokens:
148
+ if len(current_double) < 2:
149
+ current_double.append(i)
150
+ else:
151
+ double_list.append(current_double)
152
+ current_double = []
153
+ current_double = []
154
+ for i in types:
155
+ if len(current_double) < 2:
156
+ current_double.append(i)
157
+ else:
158
+ double_type_list.append(current_double)
159
+ current_double = []
160
+ print(double_type_list)
161
+ print(double_list)
162
+ current = 0
163
+ for i, type_double in enumerate(double_type_list):
164
+ if len(type_double) == 1:
165
+ current += double_list[i][0]
166
+ elif type_double[0] == type_double[1]:
167
+ current += int(str(double_list[i][0]) + str(double_list[i][1]))
168
+ elif type_double[0] > type_double[1]:
169
+ current += sum(double_list[i])
170
+ elif type_double[0] < type_double[1]:
171
+ current += double_list[i][0] * double_list[i][1]
172
+ #print(current)
173
+ """
174
+ count = 0
175
+ current = 0
176
+ for i, token in enumerate(tokens):
177
+ count += 1
178
+ if count == 2:
179
+ if types[i - 1] == types[i]:
180
+ current += int(str(token) + str(tokens[i - 1]))
181
+ elif types[i - 1] > types[i]:
182
+ current += tokens[i - 1] + token
183
+ else:
184
+ current += tokens[i - 1] * token
185
+ count = 0
186
+ elif i == len(tokens) - 1:
187
+ current += token
188
+
189
+ return current
190
 
191
 
192
+ def text2int(text):
193
+ # Wraps all of the functions up into one
194
+ return tokens2int(tokenize(text))
195
+
196
 
197
  sentiment = pipeline(task="sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
198
 
 
215
  button_text2int = gr.Button("text2int")
216
 
217
  button_text2int.click(
218
+ fn=text2int,
219
  inputs=inputs_text2int,
220
  outputs=outputs_text2int,
221
  api_name="text2int",
 
229
  gr.Examples(examples=examples_text2int, inputs=inputs_text2int)
230
 
231
  gr.Markdown(r"""
 
232
  ## API
233
  ```python
234
  import requests
 
243
  ```bash
244
  curl -X POST https://tangibleai-mathtext.hf.space/run/text2int -H 'Content-Type: application/json' -d '{"data": ["one hundred forty five"]}'
245
  ```
246
+ """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
 
248
  with gr.Tab("Sentiment Analysis"):
249
  inputs_sentiment = [
 
270
  gr.Examples(examples=examples_sentiment, inputs=inputs_sentiment)
271
 
272
  gr.Markdown(r"""
 
273
  ## API
274
  ```python
275
  import requests
 
284
  ```bash
285
  curl -X POST https://tangibleai-mathtext.hf.space/run/sentiment-analysis -H 'Content-Type: application/json' -d '{"data": ["You are right!"]}'
286
  ```
287
+ """)
288
 
289
  # interface = gr.Interface(lambda x: x, inputs=["text"], outputs=["text"])
290
  # html_block.input_components = interface.input_components
plot_calls.py CHANGED
@@ -1,9 +1,29 @@
 
1
  import matplotlib.pyplot as plt
2
  import pandas as pd
3
 
4
- df = pd.read_csv('call_history.csv') # data loading
5
- print(df)
6
 
7
- df.plot(by='endpoint', column='delay', kind='box', showmeans=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  plt.show()
 
1
+ from datetime import datetime
2
  import matplotlib.pyplot as plt
3
  import pandas as pd
4
 
 
 
5
 
6
+ # pd.set_option('display.max_columns', None)
7
+ # pd.set_option('display.max_rows', None)
8
+
9
+ df = pd.read_csv(filepath_or_buffer='call_history_bash.csv', sep=";")
10
+ df["elapsed"] = df["finished"].apply(lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f")) - df["started"].apply(lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"))
11
+ df["elapsed"] = df["elapsed"].apply(lambda x: x.total_seconds())
12
+ df.to_csv("output.csv", index=False, sep=";")
13
+
14
+ student_numbers = df['active_students'].unique()
15
+
16
+ plt.figure(figsize=(16, 10))
17
+ rows = len(student_numbers)
18
+
19
+ for index, student_number in enumerate(student_numbers, 1):
20
+ data = df[df["active_students"] == student_number]
21
+ plt.subplot(rows, 2, 2 * index - 1)
22
+ plt.title("y=seconds, x=active students", x=0.75, y=0.75)
23
+ plt.boxplot(x=data["elapsed"], labels=[student_number])
24
+ plt.subplot(rows, 2, 2 * index)
25
+ plt.title("y=count of seconds, x=seconds", x=0.75, y=0.75)
26
+ plt.hist(x=data["elapsed"], bins=25, edgecolor='white')
27
+
28
 
29
  plt.show()
test_api.py CHANGED
@@ -7,12 +7,12 @@ import pandas as pd
7
  import httpx
8
  from os.path import exists
9
 
10
- NUMBER_OF_CALLS = 20
11
 
12
  headers = {"Content-Type": "application/json; charset=utf-8"}
13
 
14
- base_url = "https://tangibleai-mathtext.hf.space/run/{endpoint}"
15
- # base_url = "http://localhost:7860/run/{endpoint}"
16
 
17
  data_list_1 = {
18
  "endpoint": "text2int",
 
7
  import httpx
8
  from os.path import exists
9
 
10
+ NUMBER_OF_CALLS = 1
11
 
12
  headers = {"Content-Type": "application/json; charset=utf-8"}
13
 
14
+ # base_url = "https://tangibleai-mathtext.hf.space/run/{endpoint}"
15
+ base_url = "http://localhost:7860/run/{endpoint}"
16
 
17
  data_list_1 = {
18
  "endpoint": "text2int",
test_api.sh CHANGED
@@ -1,5 +1,13 @@
1
  #! /bin/env bash
2
 
 
 
 
 
 
 
 
 
3
  data_list_1() {
4
  responses=(
5
  "one hundred forty five"
@@ -22,39 +30,48 @@ data_list_2() {
22
  echo "${responses[$1]}"
23
  }
24
 
25
- text2int="https://tangibleai-mathtext.hf.space/run/text2int"
26
- text2intpreprocessed="https://tangibleai-mathtext.hf.space/run/text2int-preprocessed"
27
- sentimentanalysis="https://tangibleai-mathtext.hf.space/run/sentiment-analysis"
28
 
29
- test_endpoint() {
30
- start_=$(date +%s.%N)
31
- response=$(curl --silent -X POST "$1" -H 'Content-Type: application/json' -d "$2")
32
- end_=$(date +%s.%N)
33
- diff=$(echo "$end_ - $start_" | bc)
34
- printf " endpoint:%s\n data:%s delay:%s:\n %s\n" "$1" "$2" "$diff" "$response"
35
  }
36
 
37
- echo "start: $(date)"
38
 
39
- for i in {1..20}; do
40
- random_value=$((RANDOM % 5))
41
- text=$(data_list_1 $random_value)
42
- data='{"data": ["'$text'"]}'
43
- test_endpoint "$text2int" "$data" >>call_history.txt &
44
- done
45
 
46
- for i in {1..20}; do
47
- random_value=$((RANDOM % 5))
48
- text=$(data_list_1 $random_value)
49
- data='{"data": ["'$text'"]}'
50
- test_endpoint "$text2intpreprocessed" "$data" >>call_history.txt &
51
- done
 
 
 
 
 
 
 
 
 
52
 
53
- for i in {1..20}; do
 
 
54
  random_value=$((RANDOM % 5))
55
  text=$(data_list_2 $random_value)
56
  data='{"data": ["'$text'"]}'
57
- test_endpoint "$sentimentanalysis" "$data" >>call_history.txt &
 
 
58
  done
59
 
60
  wait
 
1
  #! /bin/env bash
2
 
3
+ LOG_FILE_NAME="call_history_bash.csv"
4
+
5
+ if [[ ! -f "$LOG_FILE_NAME" ]]; then
6
+ # Creation of column names if the file does not exits
7
+ echo "student_id;active_students;endpoint;inputs;outputs;started;finished" > $LOG_FILE_NAME
8
+ fi
9
+
10
+
11
  data_list_1() {
12
  responses=(
13
  "one hundred forty five"
 
30
  echo "${responses[$1]}"
31
  }
32
 
33
+ # endpoints: "text2int" "text2int-preprocessed" "sentiment-analysis"
34
+ # selected endpoint to test
35
+ endpoint="text2int"
36
 
37
+ create_random_delay () {
38
+ # creates a random delay for given arguments
39
+ echo "scale=8; $RANDOM/32768*$1" | bc
 
 
 
40
  }
41
 
 
42
 
43
+ simulate_student() {
44
+ # Student simulator waits randomly between 0-10s after an interaction.
45
+ # Based on 100 interactions per student
46
+ for i in {1..100}; do
47
+ start_=$(date +"%F %T.%6N")
48
+ url="https://tangibleai-mathtext.hf.space/run/$3"
49
 
50
+ response=$(curl --silent -X POST "$url" -H 'Content-Type: application/json' -d "$4")
51
+
52
+ if [[ "$response" == *"504"* ]]; then
53
+ response="504 Gateway Time-out"
54
+ fi
55
+
56
+ end_=$(date +"%F %T.%6N")
57
+ printf "%s;%s;%s;%s;%s;%s;%s\n" "$1" "$2" "$3" "$4" "$response" "$start_" "$end_" >>$LOG_FILE_NAME
58
+ sleep "$(create_random_delay 10)"
59
+ done
60
+ }
61
+
62
+ echo "start: $(date)"
63
+
64
+ active_students=250 # the number of students using the system at the same time
65
 
66
+ i=1
67
+ while [[ "$i" -le "$active_students" ]]
68
+ do
69
  random_value=$((RANDOM % 5))
70
  text=$(data_list_2 $random_value)
71
  data='{"data": ["'$text'"]}'
72
+ simulate_student "student$i" "$active_students" "$endpoint" "$data" &
73
+ sleep "$(create_random_delay 1)" # adding a random delay between students
74
+ i=$(( "$i" + 1 ))
75
  done
76
 
77
  wait