baksEtech commited on
Commit
accf675
·
verified ·
1 Parent(s): 8df6fb8

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .argilla/dataset.json +16 -0
  2. .argilla/settings.json +54 -0
  3. .argilla/version.json +3 -0
  4. README.md +142 -37
.argilla/dataset.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "dd81abea-e50b-4f8f-b127-7cd47e43cfd8",
3
+ "name": "advanced_audio_dataset",
4
+ "guidelines": "These are some guidelines.",
5
+ "allow_extra_metadata": false,
6
+ "status": "ready",
7
+ "distribution": {
8
+ "strategy": "overlap",
9
+ "min_submitted": 1
10
+ },
11
+ "metadata": null,
12
+ "workspace_id": "8c9082cb-f67a-41af-a2e7-8a62baf8196e",
13
+ "last_activity_at": "2025-04-30T10:50:53.913883",
14
+ "inserted_at": "2025-04-30T10:46:02.934237",
15
+ "updated_at": "2025-04-30T10:46:03.511515"
16
+ }
.argilla/settings.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "guidelines": "These are some guidelines.",
3
+ "allow_extra_metadata": false,
4
+ "distribution": {
5
+ "strategy": "overlap",
6
+ "min_submitted": 1
7
+ },
8
+ "fields": [
9
+ {
10
+ "id": "6d595bd1-9cce-4778-a4fd-b3f417783412",
11
+ "name": "audio",
12
+ "title": "Audio Player",
13
+ "required": true,
14
+ "settings": {
15
+ "type": "custom",
16
+ "template": "\n<div id=\"audio-container\"></div>\n<script>\n// Helper function: Write a string to a DataView.\nfunction writeString(view, offset, string) {\n for (var i = 0; i < string.length; i++) {\n view.setUint8(offset + i, string.charCodeAt(i));\n }\n}\n// Convert an array of floats (normalized -1 to 1) to 16-bit PCM.\nfunction floatTo16BitPCM(output, offset, input) {\n for (var i = 0; i < input.length; i++, offset += 2) {\n var s = Math.max(-1, Math.min(1, input[i]));\n output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);\n }\n}\n// Convert a float array and sample rate into a WAV file buffer.\nfunction arrayToWav(samples, sampleRate) {\n var buffer = new ArrayBuffer(44 + samples.length * 2);\n var view = new DataView(buffer);\n writeString(view, 0, 'RIFF');\n view.setUint32(4, 36 + samples.length * 2, true);\n writeString(view, 8, 'WAVE');\n writeString(view, 12, 'fmt ');\n view.setUint32(16, 16, true);\n view.setUint16(20, 1, true);\n view.setUint16(22, 1, true);\n view.setUint32(24, sampleRate, true);\n view.setUint32(28, sampleRate * 2, true);\n view.setUint16(32, 2, true);\n view.setUint16(34, 16, true);\n writeString(view, 36, 'data');\n view.setUint32(40, samples.length * 2, true);\n floatTo16BitPCM(view, 44, samples);\n return buffer;\n}\n\n// Advanced mode: process the raw audio array from record.fields.audio.array\nvar audioData = record.fields.audio.array;\nvar sampleRate = record.fields.audio.sampling_rate || 16000;\nif (audioData && audioData.length > 0) {\n var wavBuffer = arrayToWav(audioData, sampleRate);\n var blob = new Blob([wavBuffer], { type: 'audio/wav' });\n var audioUrl = URL.createObjectURL(blob);\n var audioElem = document.createElement('audio');\n audioElem.controls = true;\n audioElem.autoplay = true; // Automatically play on render.\n audioElem.src = audioUrl;\n document.getElementById('audio-container').appendChild(audioElem);\n} else {\n document.getElementById('audio-container').innerHTML = \"No audio data available.\";\n}\n</script>\n",
17
+ "advanced_mode": true
18
+ },
19
+ "dataset_id": "dd81abea-e50b-4f8f-b127-7cd47e43cfd8",
20
+ "inserted_at": "2025-04-30T10:46:03.277274",
21
+ "updated_at": "2025-04-30T10:46:03.277274"
22
+ }
23
+ ],
24
+ "questions": [
25
+ {
26
+ "id": "109ca1f4-24f5-4f8b-bdb0-0a75e5f1187a",
27
+ "name": "genre",
28
+ "title": "genre",
29
+ "description": null,
30
+ "required": true,
31
+ "settings": {
32
+ "type": "label_selection",
33
+ "options": [
34
+ {
35
+ "value": "homme",
36
+ "text": "homme",
37
+ "description": null
38
+ },
39
+ {
40
+ "value": "femme",
41
+ "text": "femme",
42
+ "description": null
43
+ }
44
+ ],
45
+ "visible_options": null
46
+ },
47
+ "dataset_id": "dd81abea-e50b-4f8f-b127-7cd47e43cfd8",
48
+ "inserted_at": "2025-04-30T10:46:03.396194",
49
+ "updated_at": "2025-04-30T10:46:03.396194"
50
+ }
51
+ ],
52
+ "metadata": [],
53
+ "vectors": []
54
+ }
.argilla/version.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "argilla": "2.8.0"
3
+ }
README.md CHANGED
@@ -1,39 +1,144 @@
1
  ---
2
- dataset_info:
3
- features:
4
- - name: id
5
- dtype: string
6
- - name: status
7
- dtype: string
8
- - name: inserted_at
9
- dtype: timestamp[us]
10
- - name: updated_at
11
- dtype: timestamp[us]
12
- - name: _server_id
13
- dtype: string
14
- - name: audio
15
- struct:
16
- - name: array
17
- sequence: float64
18
- - name: path
19
- dtype: string
20
- - name: sampling_rate
21
- dtype: int64
22
- - name: genre.responses
23
- sequence: string
24
- - name: genre.responses.users
25
- sequence: string
26
- - name: genre.responses.status
27
- sequence: string
28
- splits:
29
- - name: train
30
- num_bytes: 11789816
31
- num_examples: 10
32
- download_size: 3252766
33
- dataset_size: 11789816
34
- configs:
35
- - config_name: default
36
- data_files:
37
- - split: train
38
- path: data/train-*
39
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ tags:
3
+ - rlfh
4
+ - argilla
5
+ - human-feedback
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  ---
7
+
8
+ # Dataset Card for advanced_audio_dataset
9
+
10
+
11
+
12
+
13
+
14
+
15
+
16
+ This dataset has been created with [Argilla](https://github.com/argilla-io/argilla). As shown in the sections below, this dataset can be loaded into your Argilla server as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets).
17
+
18
+
19
+ ## Using this dataset with Argilla
20
+
21
+ To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code:
22
+
23
+ ```python
24
+ import argilla as rg
25
+
26
+ ds = rg.Dataset.from_hub("baksEtech/advanced_audio_dataset", settings="auto")
27
+ ```
28
+
29
+ This will load the settings and records from the dataset repository and push them to you Argilla server for exploration and annotation.
30
+
31
+ ## Using this dataset with `datasets`
32
+
33
+ To load the records of this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code:
34
+
35
+ ```python
36
+ from datasets import load_dataset
37
+
38
+ ds = load_dataset("baksEtech/advanced_audio_dataset")
39
+ ```
40
+
41
+ This will only load the records of the dataset, but not the Argilla settings.
42
+
43
+ ## Dataset Structure
44
+
45
+ This dataset repo contains:
46
+
47
+ * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `rg.Dataset.from_hub` and can be loaded independently using the `datasets` library via `load_dataset`.
48
+ * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla.
49
+ * A dataset configuration folder conforming to the Argilla dataset format in `.argilla`.
50
+
51
+ The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**.
52
+
53
+ ### Fields
54
+
55
+ The **fields** are the features or text of a dataset's records. For example, the 'text' column of a text classification dataset of the 'prompt' column of an instruction following dataset.
56
+
57
+ | Field Name | Title | Type | Required |
58
+ | ---------- | ----- | ---- | -------- |
59
+ | audio | Audio Player | custom | True |
60
+
61
+
62
+ ### Questions
63
+
64
+ The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking.
65
+
66
+ | Question Name | Title | Type | Required | Description | Values/Labels |
67
+ | ------------- | ----- | ---- | -------- | ----------- | ------------- |
68
+ | genre | genre | label_selection | True | N/A | ['homme', 'femme'] |
69
+
70
+
71
+ <!-- check length of metadata properties -->
72
+
73
+
74
+
75
+
76
+ ### Data Splits
77
+
78
+ The dataset contains a single split, which is `train`.
79
+
80
+ ## Dataset Creation
81
+
82
+ ### Curation Rationale
83
+
84
+ [More Information Needed]
85
+
86
+ ### Source Data
87
+
88
+ #### Initial Data Collection and Normalization
89
+
90
+ [More Information Needed]
91
+
92
+ #### Who are the source language producers?
93
+
94
+ [More Information Needed]
95
+
96
+ ### Annotations
97
+
98
+ #### Annotation guidelines
99
+
100
+ These are some guidelines.
101
+
102
+ #### Annotation process
103
+
104
+ [More Information Needed]
105
+
106
+ #### Who are the annotators?
107
+
108
+ [More Information Needed]
109
+
110
+ ### Personal and Sensitive Information
111
+
112
+ [More Information Needed]
113
+
114
+ ## Considerations for Using the Data
115
+
116
+ ### Social Impact of Dataset
117
+
118
+ [More Information Needed]
119
+
120
+ ### Discussion of Biases
121
+
122
+ [More Information Needed]
123
+
124
+ ### Other Known Limitations
125
+
126
+ [More Information Needed]
127
+
128
+ ## Additional Information
129
+
130
+ ### Dataset Curators
131
+
132
+ [More Information Needed]
133
+
134
+ ### Licensing Information
135
+
136
+ [More Information Needed]
137
+
138
+ ### Citation Information
139
+
140
+ [More Information Needed]
141
+
142
+ ### Contributions
143
+
144
+ [More Information Needed]