abarbosa commited on
Commit
b37adfa
·
verified ·
1 Parent(s): 7236a53

Update setup script (#4)

Browse files

- address script documentation and add reference by grader (e968adefbca584126c4b4fc2c0e8f4d4c7a67ecb)
- update gitignore (ac78acc4d75fad46672f51163444b4c9abfc34f2)
- add uv refence (1fdeff28295bfef1616b594b2f025a1f96b85dee)
- add reference columns to all splits (3cf44d9b50cdc889e7c17997361645d241241f91)
- update readme (711ca56fe85c185fe6731a6b2e534044d271eab2)
- update readme (bc306613b7b163651c59ed1d8490926c6161ce8a)

Files changed (4) hide show
  1. .gitignore +2 -0
  2. README.md +3 -2
  3. aes_enem_dataset.py +136 -42
  4. pyproject.toml +13 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ uv.lock
2
+ .python-version
README.md CHANGED
@@ -16,7 +16,7 @@ size_categories:
16
  ## Dataset Description
17
  - **Purpose**: Automated Essay Scoring
18
  - **Contents**: Student Essay Grades
19
- - **Source**: https://github.com/kamel-usp/aes_enem
20
  - **Size**: N<1000
21
 
22
  ## Use Case and Creators
@@ -70,10 +70,11 @@ size_categories:
70
  - sourceAOnly: sourceA data
71
  - sourceAWithGraders: sourceA data augmented with Grader's review. In a nutshell, each row becomes three (the original grade plus two graders result)
72
  - sourceB: sourceB data
 
73
  ## Data Considerations
74
  - **Known Limitations**:
75
  - **Ethical Considerations**:
76
 
77
  ## Additional Information
78
- - **Additional Links**: Main code is [here](https://github.com/kamel-usp/aes_enem)
79
  - **Related Datasets**: https://github.com/evelinamorim/aes-pt
 
16
  ## Dataset Description
17
  - **Purpose**: Automated Essay Scoring
18
  - **Contents**: Student Essay Grades
19
+ - **Source**: https://huggingface.co/datasets/kamel-usp/aes_enem_dataset
20
  - **Size**: N<1000
21
 
22
  ## Use Case and Creators
 
70
  - sourceAOnly: sourceA data
71
  - sourceAWithGraders: sourceA data augmented with Grader's review. In a nutshell, each row becomes three (the original grade plus two graders result)
72
  - sourceB: sourceB data
73
+ - PROPOR2024: Same split used in PROPOR2024 paper. The others are updated and fix some tiny bugs (eg reproducilibity issues)
74
  ## Data Considerations
75
  - **Known Limitations**:
76
  - **Ethical Considerations**:
77
 
78
  ## Additional Information
79
+ - **Additional Links**: Main code is [here](https://huggingface.co/datasets/kamel-usp/aes_enem_dataset)
80
  - **Related Datasets**: https://github.com/evelinamorim/aes-pt
aes_enem_dataset.py CHANGED
@@ -11,9 +11,6 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
 
18
  import csv
19
  import math
@@ -28,16 +25,45 @@ from tqdm.auto import tqdm
28
 
29
  np.random.seed(42) # Set the seed
30
 
31
- # TODO: Add BibTeX citation
32
- # Find for instance the citation on arxiv or on the dataset repo/website
33
- _CITATION = """\
34
- TODO
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  """
36
 
37
- # TODO: Add description of the dataset here
38
- # You can copy an official description
39
  _DESCRIPTION = """\
40
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  """
42
 
43
  # TODO: Add a link to an official homepage for the dataset here
@@ -81,6 +107,7 @@ CSV_HEADER = [
81
  "general",
82
  "specific",
83
  "essay_year",
 
84
  ]
85
 
86
  CSV_HEADERPROPOR = [
@@ -90,35 +117,53 @@ CSV_HEADERPROPOR = [
90
  "essay",
91
  "grades",
92
  "essay_year",
 
93
  ]
94
 
95
  SOURCE_A_DESC = """
96
- Source A have 860 essays available from August 2015 to March 2020.
97
- For each month of that period, a new prompt together with supporting texts were given, and the graded essays from the previous month were made available.
 
98
  Of the 56 prompts, 12 had no associated essays available (at the time of download).
99
- Additionally, there were 3 prompts that asked for a text in the format of a letter. We removed those 15 prompts and associated texts from the corpus.
100
- For an unknown reason, 414 of the essays were graded using a five-point scale of either {0, 50, 100, 150, 200} or its scaled-down version going from 0 to 2.
101
- To avoid introducing bias, we also discarded such instances, resulting in a dataset of 386 annotated essays with prompts and supporting texts (with each component being clearly identified).
102
- Some of the essays used a six-point scale with 20 points instead of 40 points as the second class. As we believe this introduces minimal bias, we kept such essays and relabeled class 20 as class 40.
103
- The original data contains comments from the annotators explaining their per-competence scores. They are included in our dataset.
 
 
 
 
 
104
  """
105
 
106
- SOURCE_A_WITH_GRADERS = "Same as SourceA but augmented with reviwers contractors grade's. Each essay then have three grades: the downloaded one and each grader's feedback. "
 
 
 
 
 
 
107
 
108
  SOURCE_B_DESC = """
109
- Source B is very similar to Source A: a new prompt and supporting texts are made available every month along with the graded essays submitted in the previous month.
110
- We downloaded HTML sources from 7,700 essays from May 2009 to May 2023. Essays released prior to June 2016 were graded on a five-point scale and consequently discarded.
 
 
111
  This resulted in a corpus of approx. 3,200 graded essays on 83 different prompts.
112
 
113
- Although in principle, Source B also provides supporting texts for students, none were available at the time the data was downloaded.
114
- To mitigate this, we extracted supporting texts from the Essay-Br corpus, whenever possible, by manually matching prompts between the two corpora.
115
- We ended up with approx. 1,000 essays containing both prompt and supporting texts, and approx. 2,200 essays containing only the respective prompt.
 
 
 
116
  """
117
 
118
  PROPOR2024 = """
119
- Splits used for PROPOR paper. It is a variation of sourceAWithGraders dataset. Post publication we noticed that there was an issue in the reproducible setting.
120
-
121
- We fix that and set this config to keep reproducibility w.r.t. numbers reported in the paper.
122
  """
123
 
124
 
@@ -147,20 +192,34 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
147
  ]
148
 
149
  def _info(self):
150
- features = datasets.Features(
151
- {
152
- "id": datasets.Value("string"),
153
- "id_prompt": datasets.Value("string"),
154
- "prompt": datasets.Value("string"),
155
- "supporting_text": datasets.Value("string"),
156
- "essay_title": datasets.Value("string"),
157
- "essay_text": datasets.Value("string"),
158
- "grades": datasets.Sequence(datasets.Value("int16")),
159
- "essay_year": datasets.Value("int16"),
160
- "general_comment": datasets.Value("string"),
161
- "specific_comment": datasets.Value("string"),
162
- }
163
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
165
  return datasets.DatasetInfo(
166
  # This is the description that will appear on the datasets page.
@@ -215,11 +274,42 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
215
  ] # arbitrary removal of zero graded essays
216
  df.to_csv(filepath, index=False)
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  def _split_generators(self, dl_manager):
219
  urls = _URLS[self.config.name]
220
  extracted_files = dl_manager.download_and_extract({self.config.name: urls})
221
  if "PROPOR2024" == self.config.name:
222
  base_path = extracted_files["PROPOR2024"]
 
223
  return [
224
  datasets.SplitGenerator(
225
  name=datasets.Split.TRAIN,
@@ -306,7 +396,8 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
306
  for grader in [grader_a, grader_b]:
307
  grader.grades = grader.grades.apply(lambda x: x.strip("[]").split(", "))
308
  grader.grades = grader.grades.apply(map_list)
309
-
 
310
  return grader_a, grader_b
311
 
312
  def _generate_splits(self, filepath: str, train_size=0.7):
@@ -409,7 +500,6 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
409
  assert (
410
  len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
411
  ), "Overlap between val and test id_prompt"
412
- #train_df['essay_year'] = train_df['essay_year'].astype(int)
413
  train_df.to_csv(f"{dirname}/train.csv", index=False)
414
  val_df.to_csv(f"{dirname}/validation.csv", index=False)
415
  test_df.to_csv(f"{dirname}/test.csv", index=False)
@@ -430,6 +520,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
430
  "essay_text": row["essay"],
431
  "grades": grades,
432
  "essay_year": row["essay_year"],
 
433
  }
434
  else:
435
  with open(filepath, encoding="utf-8") as csvfile:
@@ -449,6 +540,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
449
  "essay_year": row["essay_year"],
450
  "general_comment": row["general"],
451
  "specific_comment": row["specific"],
 
452
  }
453
 
454
 
@@ -719,6 +811,7 @@ class HTMLParser:
719
  general_comment = None
720
  specific_comment = None
721
  essay_year = None
 
722
  for prompt_folder in tqdm(
723
  sub_folders,
724
  desc=f"Parsing HTML files from: {key}",
@@ -761,6 +854,7 @@ class HTMLParser:
761
  general_comment,
762
  specific_comment,
763
  essay_year,
 
764
  ]
765
  )
766
  essay_id += 1
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
 
 
 
14
 
15
  import csv
16
  import math
 
25
 
26
  np.random.seed(42) # Set the seed
27
 
28
+ _CITATION = """
29
+ @inproceedings{silveira-etal-2024-new,
30
+ title = "A New Benchmark for Automatic Essay Scoring in {P}ortuguese",
31
+ author = "Silveira, Igor Cataneo and
32
+ Barbosa, Andr{\'e} and
33
+ Mau{\'a}, Denis Deratani",
34
+ editor = "Gamallo, Pablo and
35
+ Claro, Daniela and
36
+ Teixeira, Ant{\'o}nio and
37
+ Real, Livy and
38
+ Garcia, Marcos and
39
+ Oliveira, Hugo Goncalo and
40
+ Amaro, Raquel",
41
+ booktitle = "Proceedings of the 16th International Conference on Computational Processing of Portuguese - Vol. 1",
42
+ month = mar,
43
+ year = "2024",
44
+ address = "Santiago de Compostela, Galicia/Spain",
45
+ publisher = "Association for Computational Lingustics",
46
+ url = "https://aclanthology.org/2024.propor-1.23/",
47
+ pages = "228--237"
48
+ }
49
  """
50
 
 
 
51
  _DESCRIPTION = """\
52
+ This dataset was created as part of our work on advancing Automatic Essay Scoring for
53
+ Brazilian Portuguese. It comprises a large collection of publicly available essays
54
+ collected from websites simulating University Entrance Exams, with a subset expertly
55
+ annotated to provide reliable assessment indicators. The dataset includes both the raw
56
+ text and processed forms of the essays, along with supporting prompts and supplemental
57
+ texts.
58
+
59
+ Key Features:
60
+ - A diverse corpus of essays with detailed annotations.
61
+ - A subset graded by expert annotators to evaluate essay quality and task difficulty.
62
+ - Comprehensive metadata providing provenance and context for each essay.
63
+ - An empirical analysis framework to support state-of-the-art predictive modeling.
64
+
65
+ For further details, please refer to the paper “A New Benchmark for Automatic Essay
66
+ Scoring in Portuguese” available at https://aclanthology.org/2024.propor-1.23/.
67
  """
68
 
69
  # TODO: Add a link to an official homepage for the dataset here
 
107
  "general",
108
  "specific",
109
  "essay_year",
110
+ "reference"
111
  ]
112
 
113
  CSV_HEADERPROPOR = [
 
117
  "essay",
118
  "grades",
119
  "essay_year",
120
+ "reference"
121
  ]
122
 
123
  SOURCE_A_DESC = """
124
+ SourceA have 860 essays available from August 2015 to March 2020.
125
+ For each month of that period, a new prompt together with supporting texts were given,
126
+ and the graded essays from the previous month were made available.
127
  Of the 56 prompts, 12 had no associated essays available (at the time of download).
128
+ Additionally, there were 3 prompts that asked for a text in the format of a letter.
129
+ We removed those 15 prompts and associated texts from the corpus.
130
+ For an unknown reason, 414 of the essays were graded using a five-point scale of either
131
+ {0, 50, 100, 150, 200} or its scaled-down version going from 0 to 2.
132
+ To avoid introducing bias, we also discarded such instances, resulting in a dataset of
133
+ 386 annotated essays with prompts and supporting texts (with each component being clearly identified).
134
+ Some of the essays used a six-point scale with 20 points instead of 40 points as the second class.
135
+ As we believe this introduces minimal bias, we kept such essays and relabeled class 20 as class 40.
136
+ The original data contains comments from the annotators explaining their per-competence scores.
137
+ They are included in our dataset.
138
  """
139
 
140
+ SOURCE_A_WITH_GRADERS = """
141
+ sourceAWithGraders includes the original dataset augmented with grades from additional reviewers.
142
+ Each essay is replicated three times:
143
+ 1. The original essay with its grades from the website.
144
+ 2. The same essay with grades from the first human grader.
145
+ 3. The same essay with grades from the second human grader.
146
+ """
147
 
148
  SOURCE_B_DESC = """
149
+ SourceB is very similar to Source A: a new prompt and supporting texts are made
150
+ available every month along with the graded essays submitted in the previous month.
151
+ We downloaded HTML sources from 7,700 essays from May 2009 to May 2023. Essays released
152
+ prior to June 2016 were graded on a five-point scale and consequently discarded.
153
  This resulted in a corpus of approx. 3,200 graded essays on 83 different prompts.
154
 
155
+ Although in principle, Source B also provides supporting texts for students, none were
156
+ available at the time the data was downloaded.
157
+ To mitigate this, we extracted supporting texts from the Essay-Br corpus, whenever
158
+ possible, by manually matching prompts between the two corpora.
159
+ We ended up with approx. 1,000 essays containing both prompt and supporting texts, and
160
+ approx. 2,200 essays containing only the respective prompt.
161
  """
162
 
163
  PROPOR2024 = """
164
+ This split corresponds to the results reported in the PROPOR 2024 paper. While reproducibility was
165
+ fixed in the sourceAWithGraders configuration, this split preserves the original
166
+ distribution of prompts and scores as used in the paper.
167
  """
168
 
169
 
 
192
  ]
193
 
194
  def _info(self):
195
+ if self.config.name=="PROPOR2024":
196
+ features = datasets.Features(
197
+ {
198
+ "id": datasets.Value("string"),
199
+ "id_prompt": datasets.Value("string"),
200
+ "essay_title": datasets.Value("string"),
201
+ "essay_text": datasets.Value("string"),
202
+ "grades": datasets.Sequence(datasets.Value("int16")),
203
+ "essay_year": datasets.Value("int16"),
204
+ "reference": datasets.Value("string"),
205
+ }
206
+ )
207
+ else:
208
+ features = datasets.Features(
209
+ {
210
+ "id": datasets.Value("string"),
211
+ "id_prompt": datasets.Value("string"),
212
+ "prompt": datasets.Value("string"),
213
+ "supporting_text": datasets.Value("string"),
214
+ "essay_title": datasets.Value("string"),
215
+ "essay_text": datasets.Value("string"),
216
+ "grades": datasets.Sequence(datasets.Value("int16")),
217
+ "essay_year": datasets.Value("int16"),
218
+ "general_comment": datasets.Value("string"),
219
+ "specific_comment": datasets.Value("string"),
220
+ "reference": datasets.Value("string"),
221
+ }
222
+ )
223
 
224
  return datasets.DatasetInfo(
225
  # This is the description that will appear on the datasets page.
 
274
  ] # arbitrary removal of zero graded essays
275
  df.to_csv(filepath, index=False)
276
 
277
+ def _preprocess_propor2024(self, base_path: str):
278
+ for split_case in ["train.csv", "validation.csv", "test.csv"]:
279
+ filepath = f"{base_path}/propor2024/{split_case}"
280
+ df = pd.read_csv(filepath)
281
+
282
+ # Dictionary to track how many times we've seen each (id, id_prompt) pair
283
+ counts = {}
284
+ # List to store the reference for each row
285
+ references = []
286
+
287
+ # Define the mapping for each occurrence
288
+ occurrence_to_reference = {
289
+ 0: "crawled_from_web",
290
+ 1: "grader_a",
291
+ 2: "grader_b"
292
+ }
293
+
294
+ # Iterate through rows in the original order
295
+ for _, row in df.iterrows():
296
+ key = (row["id"], row["id_prompt"])
297
+ count = counts.get(key, 0)
298
+ # Assign the reference based on the count
299
+ ref = occurrence_to_reference.get(count, "unknown")
300
+ references.append(ref)
301
+ counts[key] = count + 1
302
+
303
+ # Add the reference column without changing the order of rows
304
+ df["reference"] = references
305
+ df.to_csv(filepath, index=False)
306
+
307
  def _split_generators(self, dl_manager):
308
  urls = _URLS[self.config.name]
309
  extracted_files = dl_manager.download_and_extract({self.config.name: urls})
310
  if "PROPOR2024" == self.config.name:
311
  base_path = extracted_files["PROPOR2024"]
312
+ self._preprocess_propor2024(base_path)
313
  return [
314
  datasets.SplitGenerator(
315
  name=datasets.Split.TRAIN,
 
396
  for grader in [grader_a, grader_b]:
397
  grader.grades = grader.grades.apply(lambda x: x.strip("[]").split(", "))
398
  grader.grades = grader.grades.apply(map_list)
399
+ grader_a["reference"] = "grader_a"
400
+ grader_b["reference"] = "grader_b"
401
  return grader_a, grader_b
402
 
403
  def _generate_splits(self, filepath: str, train_size=0.7):
 
500
  assert (
501
  len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
502
  ), "Overlap between val and test id_prompt"
 
503
  train_df.to_csv(f"{dirname}/train.csv", index=False)
504
  val_df.to_csv(f"{dirname}/validation.csv", index=False)
505
  test_df.to_csv(f"{dirname}/test.csv", index=False)
 
520
  "essay_text": row["essay"],
521
  "grades": grades,
522
  "essay_year": row["essay_year"],
523
+ "reference": row["reference"]
524
  }
525
  else:
526
  with open(filepath, encoding="utf-8") as csvfile:
 
540
  "essay_year": row["essay_year"],
541
  "general_comment": row["general"],
542
  "specific_comment": row["specific"],
543
+ "reference": row["reference"]
544
  }
545
 
546
 
 
811
  general_comment = None
812
  specific_comment = None
813
  essay_year = None
814
+ reference = "crawled_from_web"
815
  for prompt_folder in tqdm(
816
  sub_folders,
817
  desc=f"Parsing HTML files from: {key}",
 
854
  general_comment,
855
  specific_comment,
856
  essay_year,
857
+ reference
858
  ]
859
  )
860
  essay_id += 1
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "aes-enem-dataset"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "beautifulsoup4>=4.12.3",
9
+ "datasets>=3.2.0",
10
+ "ipdb>=0.13.13",
11
+ "pandas>=2.2.3",
12
+ "tqdm>=4.67.1",
13
+ ]