Spaces:
Running
Running
add initial files
Browse files- .devcontainer/build_image.sh +19 -0
- .devcontainer/container.env +0 -0
- .devcontainer/dev.dockerfile +69 -0
- .devcontainer/devcontainer.json +29 -0
- LICENSE +201 -0
- README.md +21 -0
- pyproject.toml +18 -0
- req.txt +6 -0
- results_asr.md +19 -0
- results_diar.md +19 -0
- results_slm.md +19 -0
- results_slu.md +19 -0
- results_sr.md +19 -0
- results_tts.md +19 -0
- setup.cfg +2 -0
- streamlit_app.py +333 -0
.devcontainer/build_image.sh
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
set -e
|
3 |
+
|
4 |
+
_root=${PWD}
|
5 |
+
|
6 |
+
if [ ! -f ".devcontainer/container.env" ]; then
|
7 |
+
touch .devcontainer/container.env
|
8 |
+
fi
|
9 |
+
|
10 |
+
# Build the Docker image
|
11 |
+
echo "[INFO] Building Docker"
|
12 |
+
|
13 |
+
docker build \
|
14 |
+
-f "${_root}/.devcontainer/dev.dockerfile" \
|
15 |
+
-t "espnet:dev-lboard" \
|
16 |
+
--build-arg USERNAME="$(whoami)" \
|
17 |
+
--build-arg USER_UID="$(id -u)" \
|
18 |
+
--build-arg USER_GID="$(id -g)" \
|
19 |
+
"${_root}"
|
.devcontainer/container.env
ADDED
File without changes
|
.devcontainer/dev.dockerfile
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM ubuntu:latest
|
2 |
+
|
3 |
+
LABEL maintainer="Nelson Yalta <[email protected]>"
|
4 |
+
|
5 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
6 |
+
ARG USERNAME=user
|
7 |
+
ARG USER_ID=1000
|
8 |
+
ARG GROUP_ID=1000
|
9 |
+
|
10 |
+
RUN apt-get update && \
|
11 |
+
apt-get -y install --no-install-recommends \
|
12 |
+
bc \
|
13 |
+
build-essential \
|
14 |
+
cmake \
|
15 |
+
curl \
|
16 |
+
gawk \
|
17 |
+
gfortran \
|
18 |
+
git \
|
19 |
+
gnupg2 \
|
20 |
+
libffi-dev \
|
21 |
+
libjpeg-dev \
|
22 |
+
libtool \
|
23 |
+
libncurses5-dev \
|
24 |
+
python3-full \
|
25 |
+
python3-dev \
|
26 |
+
python3-pip \
|
27 |
+
software-properties-common \
|
28 |
+
sudo \
|
29 |
+
unzip \
|
30 |
+
wget \
|
31 |
+
zip \
|
32 |
+
zlib1g-dev \
|
33 |
+
&& \
|
34 |
+
apt-get autoremove -y && \
|
35 |
+
apt-get clean && \
|
36 |
+
rm -rf /var/lib/apt/lists/* && \
|
37 |
+
rm -rf /tmp/* && \
|
38 |
+
mkdir -p /workspaces
|
39 |
+
|
40 |
+
RUN if [ -z "$(getent group ${GROUP_ID})" ]; then \
|
41 |
+
groupadd -g ${GROUP_ID} "${USERNAME}"; \
|
42 |
+
else \
|
43 |
+
existing_group="$(getent group $GROUP_ID | cut -d: -f1)"; \
|
44 |
+
if [ "${existing_group}" != "${USERNAME}" ]; then \
|
45 |
+
groupmod -n "${USERNAME}" "${existing_group}"; \
|
46 |
+
fi; \
|
47 |
+
fi && \
|
48 |
+
if [ -z "$(getent passwd $USER_ID)" ]; then \
|
49 |
+
useradd -m -u ${USER_ID} -g ${GROUP_ID} "${USERNAME}"; \
|
50 |
+
else \
|
51 |
+
existing_user="$(getent passwd ${USER_ID} | cut -d: -f1)"; \
|
52 |
+
if [ "${existing_user}" != "${USERNAME}" ]; then \
|
53 |
+
usermod -l "${USERNAME}" -d /home/"${USERNAME}" -m "${existing_user}"; \
|
54 |
+
fi; \
|
55 |
+
fi && \
|
56 |
+
echo "${USERNAME} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers && \
|
57 |
+
sed -i 's/#force_color_prompt=yes/force_color_prompt=yes/g' /home/${USERNAME}/.bashrc && \
|
58 |
+
chown -R ${USERNAME}:${USERNAME} /workspaces
|
59 |
+
|
60 |
+
USER ${USERNAME}
|
61 |
+
|
62 |
+
# Latest version of git
|
63 |
+
ENV TZ=Etc/UTC
|
64 |
+
ENV PATH=/workspaces/venv/bin:${PATH}
|
65 |
+
ENV STREAMLIT_SERVER_ADDRESS=localhost
|
66 |
+
|
67 |
+
RUN python3 -m venv /workspaces/venv
|
68 |
+
|
69 |
+
WORKDIR /workspaces
|
.devcontainer/devcontainer.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Leaderboard",
|
3 |
+
"updateRemoteUserUID": false,
|
4 |
+
"image": "espnet:dev-lboard",
|
5 |
+
"initializeCommand": ".devcontainer/build_image.sh",
|
6 |
+
"features": {},
|
7 |
+
"customizations": {
|
8 |
+
"vscode": {
|
9 |
+
"settings": {
|
10 |
+
"terminal.integrated.defaultProfile.linux": "bash"
|
11 |
+
},
|
12 |
+
"extensions" :[
|
13 |
+
"ms-python.python",
|
14 |
+
"ms-python.vscode-pylance",
|
15 |
+
"donjayamanne.python-extension-pack"
|
16 |
+
]
|
17 |
+
}
|
18 |
+
},
|
19 |
+
"postCreateCommand": "pip install -r req.txt --extra-index-url https://download.pytorch.org/whl/cpu",
|
20 |
+
"runArgs": [
|
21 |
+
"--rm",
|
22 |
+
"--name",
|
23 |
+
"espnet-leaderboard",
|
24 |
+
"--hostname",
|
25 |
+
"espnet"
|
26 |
+
]
|
27 |
+
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
28 |
+
// "remoteUser": "root"
|
29 |
+
}
|
LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright 2017 Johns Hopkins University (Shinji Watanabe)
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
README.md
CHANGED
@@ -12,3 +12,24 @@ short_description: Official ESPnet Leaderboard
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
15 |
+
|
16 |
+
|
17 |
+
## Leaderboard
|
18 |
+
|
19 |
+
| Model Name | Publisher | Open? | Chatbot Arena Elo | HellaSwag (few-shot) | HellaSwag (zero-shot) | HellaSwag (one-shot) | HumanEval-Python (pass@1) | LAMBADA (zero-shot) | LAMBADA (one-shot) | MMLU (zero-shot) | MMLU (few-shot) | TriviaQA (zero-shot) | TriviaQA (one-shot) | WinoGrande (zero-shot) | WinoGrande (one-shot) | WinoGrande (few-shot) |
|
20 |
+
| ----------------------------------------------------------------------------------------------------------- | ------------------- | ----- | ------------------------------------------------ | ------------------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | --------------------------------------------------------------- |
|
21 |
+
| [alpaca-7b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | | | [0.739](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | | | | | | | | [0.661](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | |
|
22 |
+
| [alpaca-13b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | [1008](https://lmsys.org/blog/2023-05-03-arena/) | | | | | | | | | | | | | |
|
23 |
+
|
24 |
+
|
25 |
+
## Benchmarks
|
26 |
+
|
27 |
+
| Benchmark Name | Author | Link | Description |
|
28 |
+
| ----------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
29 |
+
| Chatbot Arena Elo | LMSYS | https://lmsys.org/blog/2023-05-03-arena/ | "In this blog post, we introduce Chatbot Arena, an LLM benchmark platform featuring anonymous randomized battles in a crowdsourced manner. Chatbot Arena adopts the Elo rating system, which is a widely-used rating system in chess and other competitive games." (Source: https://lmsys.org/blog/2023-05-03-arena/) |
|
30 |
+
| HellaSwag | Zellers et al. | https://arxiv.org/abs/1905.07830v1 | "HellaSwag is a challenge dataset for evaluating commonsense NLI that is specially hard for state-of-the-art models, though its questions are trivial for humans (>95% accuracy)." (Source: https://paperswithcode.com/dataset/hellaswag) |
|
31 |
+
| HumanEval | Chen et al. | https://arxiv.org/abs/2107.03374v2 | "It used to measure functional correctness for synthesizing programs from docstrings. It consists of 164 original programming problems, assessing language comprehension, algorithms, and simple mathematics, with some comparable to simple software interview questions." (Source: https://paperswithcode.com/dataset/humaneval) |
|
32 |
+
| LAMBADA | Paperno et al. | https://arxiv.org/abs/1606.06031 | "The LAMBADA evaluates the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole passage, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse." (Source: https://huggingface.co/datasets/lambada) |
|
33 |
+
| MMLU | Hendrycks et al. | https://github.com/hendrycks/test | "The benchmark covers 57 subjects across STEM, the humanities, the social sciences, and more. It ranges in difficulty from an elementary level to an advanced professional level, and it tests both world knowledge and problem solving ability. Subjects range from traditional areas, such as mathematics and history, to more specialized areas like law and ethics. The granularity and breadth of the subjects makes the benchmark ideal for identifying a modelβs blind spots." (Source: "https://paperswithcode.com/dataset/mmlu") |
|
34 |
+
| TriviaQA | Joshi et al. | https://arxiv.org/abs/1705.03551v2 | "We present TriviaQA, a challenging reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions." (Source: https://arxiv.org/abs/1705.03551v2) |
|
35 |
+
| WinoGrande | Sakaguchi et al. | https://arxiv.org/abs/1907.10641v2 | "A large-scale dataset of 44k [expert-crafted pronoun resolution] problems, inspired by the original WSC design, but adjusted to improve both the scale and the hardness of the dataset." (Source: https://arxiv.org/abs/1907.10641v2) |
|
pyproject.toml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
package-mode = false
|
3 |
+
description = ""
|
4 |
+
authors = ["Nelson Yalta <[email protected]>"]
|
5 |
+
readme = "README.md"
|
6 |
+
|
7 |
+
[tool.poetry.dependencies]
|
8 |
+
python = "^3.10"
|
9 |
+
pandas = "^2.2.2"
|
10 |
+
librosa = "^0.9.0"
|
11 |
+
streamlit = "^1.37.1"
|
12 |
+
numpy = "^1.26.4"
|
13 |
+
torch = "^2.6.0"
|
14 |
+
torchaudio = "^2.6.0"
|
15 |
+
|
16 |
+
[build-system]
|
17 |
+
requires = ["poetry-core"]
|
18 |
+
build-backend = "poetry.core.masonry.api"
|
req.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pandas >= 2.2.2
|
2 |
+
librosa >= 0.9.0
|
3 |
+
streamlit >= 1.37.1
|
4 |
+
numpy >= 1.26.4
|
5 |
+
torch >= 2.6.0
|
6 |
+
torchaudio >= 2.6.0
|
results_asr.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Leaderboard
|
2 |
+
|
3 |
+
| Model Name | Publisher | Open? | Chatbot Arena Elo | HellaSwag (few-shot) | HellaSwag (zero-shot) | HellaSwag (one-shot) | HumanEval-Python (pass@1) | LAMBADA (zero-shot) | LAMBADA (one-shot) | MMLU (zero-shot) | MMLU (few-shot) | TriviaQA (zero-shot) | TriviaQA (one-shot) | WinoGrande (zero-shot) | WinoGrande (one-shot) | WinoGrande (few-shot) |
|
4 |
+
| ----------------------------------------------------------------------------------------------------------- | ------------------- | ----- | ------------------------------------------------ | ------------------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | --------------------------------------------------------------- |
|
5 |
+
| [alpaca-7b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | | | [0.739](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | | | | | | | | [0.661](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | |
|
6 |
+
| [alpaca-13b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | [1008](https://lmsys.org/blog/2023-05-03-arena/) | | | | | | | | | | | | | |
|
7 |
+
|
8 |
+
|
9 |
+
## Benchmarks
|
10 |
+
|
11 |
+
| Benchmark Name | Author | Link | Description |
|
12 |
+
| ----------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
13 |
+
| Chatbot Arena Elo | LMSYS | https://lmsys.org/blog/2023-05-03-arena/ | "In this blog post, we introduce Chatbot Arena, an LLM benchmark platform featuring anonymous randomized battles in a crowdsourced manner. Chatbot Arena adopts the Elo rating system, which is a widely-used rating system in chess and other competitive games." (Source: https://lmsys.org/blog/2023-05-03-arena/) |
|
14 |
+
| HellaSwag | Zellers et al. | https://arxiv.org/abs/1905.07830v1 | "HellaSwag is a challenge dataset for evaluating commonsense NLI that is specially hard for state-of-the-art models, though its questions are trivial for humans (>95% accuracy)." (Source: https://paperswithcode.com/dataset/hellaswag) |
|
15 |
+
| HumanEval | Chen et al. | https://arxiv.org/abs/2107.03374v2 | "It used to measure functional correctness for synthesizing programs from docstrings. It consists of 164 original programming problems, assessing language comprehension, algorithms, and simple mathematics, with some comparable to simple software interview questions." (Source: https://paperswithcode.com/dataset/humaneval) |
|
16 |
+
| LAMBADA | Paperno et al. | https://arxiv.org/abs/1606.06031 | "The LAMBADA evaluates the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole passage, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse." (Source: https://huggingface.co/datasets/lambada) |
|
17 |
+
| MMLU | Hendrycks et al. | https://github.com/hendrycks/test | "The benchmark covers 57 subjects across STEM, the humanities, the social sciences, and more. It ranges in difficulty from an elementary level to an advanced professional level, and it tests both world knowledge and problem solving ability. Subjects range from traditional areas, such as mathematics and history, to more specialized areas like law and ethics. The granularity and breadth of the subjects makes the benchmark ideal for identifying a modelβs blind spots." (Source: "https://paperswithcode.com/dataset/mmlu") |
|
18 |
+
| TriviaQA | Joshi et al. | https://arxiv.org/abs/1705.03551v2 | "We present TriviaQA, a challenging reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions." (Source: https://arxiv.org/abs/1705.03551v2) |
|
19 |
+
| WinoGrande | Sakaguchi et al. | https://arxiv.org/abs/1907.10641v2 | "A large-scale dataset of 44k [expert-crafted pronoun resolution] problems, inspired by the original WSC design, but adjusted to improve both the scale and the hardness of the dataset." (Source: https://arxiv.org/abs/1907.10641v2) |
|
results_diar.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Leaderboard
|
2 |
+
|
3 |
+
| Model Name | Publisher | Open? | Chatbot Arena Elo | HellaSwag (few-shot) | HellaSwag (zero-shot) | HellaSwag (one-shot) | HumanEval-Python (pass@1) | LAMBADA (zero-shot) | LAMBADA (one-shot) | MMLU (zero-shot) | MMLU (few-shot) | TriviaQA (zero-shot) | TriviaQA (one-shot) | WinoGrande (zero-shot) | WinoGrande (one-shot) | WinoGrande (few-shot) |
|
4 |
+
| ----------------------------------------------------------------------------------------------------------- | ------------------- | ----- | ------------------------------------------------ | ------------------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | --------------------------------------------------------------- |
|
5 |
+
| [alpaca-7b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | | | [0.739](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | | | | | | | | [0.661](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | |
|
6 |
+
| [alpaca-13b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | [1008](https://lmsys.org/blog/2023-05-03-arena/) | | | | | | | | | | | | | |
|
7 |
+
|
8 |
+
|
9 |
+
## Benchmarks
|
10 |
+
|
11 |
+
| Benchmark Name | Author | Link | Description |
|
12 |
+
| ----------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
13 |
+
| Chatbot Arena Elo | LMSYS | https://lmsys.org/blog/2023-05-03-arena/ | "In this blog post, we introduce Chatbot Arena, an LLM benchmark platform featuring anonymous randomized battles in a crowdsourced manner. Chatbot Arena adopts the Elo rating system, which is a widely-used rating system in chess and other competitive games." (Source: https://lmsys.org/blog/2023-05-03-arena/) |
|
14 |
+
| HellaSwag | Zellers et al. | https://arxiv.org/abs/1905.07830v1 | "HellaSwag is a challenge dataset for evaluating commonsense NLI that is specially hard for state-of-the-art models, though its questions are trivial for humans (>95% accuracy)." (Source: https://paperswithcode.com/dataset/hellaswag) |
|
15 |
+
| HumanEval | Chen et al. | https://arxiv.org/abs/2107.03374v2 | "It used to measure functional correctness for synthesizing programs from docstrings. It consists of 164 original programming problems, assessing language comprehension, algorithms, and simple mathematics, with some comparable to simple software interview questions." (Source: https://paperswithcode.com/dataset/humaneval) |
|
16 |
+
| LAMBADA | Paperno et al. | https://arxiv.org/abs/1606.06031 | "The LAMBADA evaluates the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole passage, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse." (Source: https://huggingface.co/datasets/lambada) |
|
17 |
+
| MMLU | Hendrycks et al. | https://github.com/hendrycks/test | "The benchmark covers 57 subjects across STEM, the humanities, the social sciences, and more. It ranges in difficulty from an elementary level to an advanced professional level, and it tests both world knowledge and problem solving ability. Subjects range from traditional areas, such as mathematics and history, to more specialized areas like law and ethics. The granularity and breadth of the subjects makes the benchmark ideal for identifying a modelβs blind spots." (Source: "https://paperswithcode.com/dataset/mmlu") |
|
18 |
+
| TriviaQA | Joshi et al. | https://arxiv.org/abs/1705.03551v2 | "We present TriviaQA, a challenging reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions." (Source: https://arxiv.org/abs/1705.03551v2) |
|
19 |
+
| WinoGrande | Sakaguchi et al. | https://arxiv.org/abs/1907.10641v2 | "A large-scale dataset of 44k [expert-crafted pronoun resolution] problems, inspired by the original WSC design, but adjusted to improve both the scale and the hardness of the dataset." (Source: https://arxiv.org/abs/1907.10641v2) |
|
results_slm.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Leaderboard
|
2 |
+
|
3 |
+
| Model Name | Publisher | Open? | Chatbot Arena Elo | HellaSwag (few-shot) | HellaSwag (zero-shot) | HellaSwag (one-shot) | HumanEval-Python (pass@1) | LAMBADA (zero-shot) | LAMBADA (one-shot) | MMLU (zero-shot) | MMLU (few-shot) | TriviaQA (zero-shot) | TriviaQA (one-shot) | WinoGrande (zero-shot) | WinoGrande (one-shot) | WinoGrande (few-shot) |
|
4 |
+
| ----------------------------------------------------------------------------------------------------------- | ------------------- | ----- | ------------------------------------------------ | ------------------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | --------------------------------------------------------------- |
|
5 |
+
| [alpaca-7b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | | | [0.739](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | | | | | | | | [0.661](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | |
|
6 |
+
| [alpaca-13b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | [1008](https://lmsys.org/blog/2023-05-03-arena/) | | | | | | | | | | | | | |
|
7 |
+
|
8 |
+
|
9 |
+
## Benchmarks
|
10 |
+
|
11 |
+
| Benchmark Name | Author | Link | Description |
|
12 |
+
| ----------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
13 |
+
| Chatbot Arena Elo | LMSYS | https://lmsys.org/blog/2023-05-03-arena/ | "In this blog post, we introduce Chatbot Arena, an LLM benchmark platform featuring anonymous randomized battles in a crowdsourced manner. Chatbot Arena adopts the Elo rating system, which is a widely-used rating system in chess and other competitive games." (Source: https://lmsys.org/blog/2023-05-03-arena/) |
|
14 |
+
| HellaSwag | Zellers et al. | https://arxiv.org/abs/1905.07830v1 | "HellaSwag is a challenge dataset for evaluating commonsense NLI that is specially hard for state-of-the-art models, though its questions are trivial for humans (>95% accuracy)." (Source: https://paperswithcode.com/dataset/hellaswag) |
|
15 |
+
| HumanEval | Chen et al. | https://arxiv.org/abs/2107.03374v2 | "It used to measure functional correctness for synthesizing programs from docstrings. It consists of 164 original programming problems, assessing language comprehension, algorithms, and simple mathematics, with some comparable to simple software interview questions." (Source: https://paperswithcode.com/dataset/humaneval) |
|
16 |
+
| LAMBADA | Paperno et al. | https://arxiv.org/abs/1606.06031 | "The LAMBADA evaluates the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole passage, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse." (Source: https://huggingface.co/datasets/lambada) |
|
17 |
+
| MMLU | Hendrycks et al. | https://github.com/hendrycks/test | "The benchmark covers 57 subjects across STEM, the humanities, the social sciences, and more. It ranges in difficulty from an elementary level to an advanced professional level, and it tests both world knowledge and problem solving ability. Subjects range from traditional areas, such as mathematics and history, to more specialized areas like law and ethics. The granularity and breadth of the subjects makes the benchmark ideal for identifying a modelβs blind spots." (Source: "https://paperswithcode.com/dataset/mmlu") |
|
18 |
+
| TriviaQA | Joshi et al. | https://arxiv.org/abs/1705.03551v2 | "We present TriviaQA, a challenging reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions." (Source: https://arxiv.org/abs/1705.03551v2) |
|
19 |
+
| WinoGrande | Sakaguchi et al. | https://arxiv.org/abs/1907.10641v2 | "A large-scale dataset of 44k [expert-crafted pronoun resolution] problems, inspired by the original WSC design, but adjusted to improve both the scale and the hardness of the dataset." (Source: https://arxiv.org/abs/1907.10641v2) |
|
results_slu.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Leaderboard
|
2 |
+
|
3 |
+
| Model Name | Publisher | Open? | Chatbot Arena Elo | HellaSwag (few-shot) | HellaSwag (zero-shot) | HellaSwag (one-shot) | HumanEval-Python (pass@1) | LAMBADA (zero-shot) | LAMBADA (one-shot) | MMLU (zero-shot) | MMLU (few-shot) | TriviaQA (zero-shot) | TriviaQA (one-shot) | WinoGrande (zero-shot) | WinoGrande (one-shot) | WinoGrande (few-shot) |
|
4 |
+
| ----------------------------------------------------------------------------------------------------------- | ------------------- | ----- | ------------------------------------------------ | ------------------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | --------------------------------------------------------------- |
|
5 |
+
| [alpaca-7b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | | | [0.739](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | | | | | | | | [0.661](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | |
|
6 |
+
| [alpaca-13b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | [1008](https://lmsys.org/blog/2023-05-03-arena/) | | | | | | | | | | | | | |
|
7 |
+
|
8 |
+
|
9 |
+
## Benchmarks
|
10 |
+
|
11 |
+
| Benchmark Name | Author | Link | Description |
|
12 |
+
| ----------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
13 |
+
| Chatbot Arena Elo | LMSYS | https://lmsys.org/blog/2023-05-03-arena/ | "In this blog post, we introduce Chatbot Arena, an LLM benchmark platform featuring anonymous randomized battles in a crowdsourced manner. Chatbot Arena adopts the Elo rating system, which is a widely-used rating system in chess and other competitive games." (Source: https://lmsys.org/blog/2023-05-03-arena/) |
|
14 |
+
| HellaSwag | Zellers et al. | https://arxiv.org/abs/1905.07830v1 | "HellaSwag is a challenge dataset for evaluating commonsense NLI that is specially hard for state-of-the-art models, though its questions are trivial for humans (>95% accuracy)." (Source: https://paperswithcode.com/dataset/hellaswag) |
|
15 |
+
| HumanEval | Chen et al. | https://arxiv.org/abs/2107.03374v2 | "It used to measure functional correctness for synthesizing programs from docstrings. It consists of 164 original programming problems, assessing language comprehension, algorithms, and simple mathematics, with some comparable to simple software interview questions." (Source: https://paperswithcode.com/dataset/humaneval) |
|
16 |
+
| LAMBADA | Paperno et al. | https://arxiv.org/abs/1606.06031 | "The LAMBADA evaluates the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole passage, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse." (Source: https://huggingface.co/datasets/lambada) |
|
17 |
+
| MMLU | Hendrycks et al. | https://github.com/hendrycks/test | "The benchmark covers 57 subjects across STEM, the humanities, the social sciences, and more. It ranges in difficulty from an elementary level to an advanced professional level, and it tests both world knowledge and problem solving ability. Subjects range from traditional areas, such as mathematics and history, to more specialized areas like law and ethics. The granularity and breadth of the subjects makes the benchmark ideal for identifying a modelβs blind spots." (Source: "https://paperswithcode.com/dataset/mmlu") |
|
18 |
+
| TriviaQA | Joshi et al. | https://arxiv.org/abs/1705.03551v2 | "We present TriviaQA, a challenging reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions." (Source: https://arxiv.org/abs/1705.03551v2) |
|
19 |
+
| WinoGrande | Sakaguchi et al. | https://arxiv.org/abs/1907.10641v2 | "A large-scale dataset of 44k [expert-crafted pronoun resolution] problems, inspired by the original WSC design, but adjusted to improve both the scale and the hardness of the dataset." (Source: https://arxiv.org/abs/1907.10641v2) |
|
results_sr.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Leaderboard
|
2 |
+
|
3 |
+
| Model Name | Publisher | Open? | Chatbot Arena Elo | HellaSwag (few-shot) | HellaSwag (zero-shot) | HellaSwag (one-shot) | HumanEval-Python (pass@1) | LAMBADA (zero-shot) | LAMBADA (one-shot) | MMLU (zero-shot) | MMLU (few-shot) | TriviaQA (zero-shot) | TriviaQA (one-shot) | WinoGrande (zero-shot) | WinoGrande (one-shot) | WinoGrande (few-shot) |
|
4 |
+
| ----------------------------------------------------------------------------------------------------------- | ------------------- | ----- | ------------------------------------------------ | ------------------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | --------------------------------------------------------------- |
|
5 |
+
| [alpaca-7b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | | | [0.739](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | | | | | | | | [0.661](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | |
|
6 |
+
| [alpaca-13b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | [1008](https://lmsys.org/blog/2023-05-03-arena/) | | | | | | | | | | | | | |
|
7 |
+
|
8 |
+
|
9 |
+
## Benchmarks
|
10 |
+
|
11 |
+
| Benchmark Name | Author | Link | Description |
|
12 |
+
| ----------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
13 |
+
| Chatbot Arena Elo | LMSYS | https://lmsys.org/blog/2023-05-03-arena/ | "In this blog post, we introduce Chatbot Arena, an LLM benchmark platform featuring anonymous randomized battles in a crowdsourced manner. Chatbot Arena adopts the Elo rating system, which is a widely-used rating system in chess and other competitive games." (Source: https://lmsys.org/blog/2023-05-03-arena/) |
|
14 |
+
| HellaSwag | Zellers et al. | https://arxiv.org/abs/1905.07830v1 | "HellaSwag is a challenge dataset for evaluating commonsense NLI that is specially hard for state-of-the-art models, though its questions are trivial for humans (>95% accuracy)." (Source: https://paperswithcode.com/dataset/hellaswag) |
|
15 |
+
| HumanEval | Chen et al. | https://arxiv.org/abs/2107.03374v2 | "It used to measure functional correctness for synthesizing programs from docstrings. It consists of 164 original programming problems, assessing language comprehension, algorithms, and simple mathematics, with some comparable to simple software interview questions." (Source: https://paperswithcode.com/dataset/humaneval) |
|
16 |
+
| LAMBADA | Paperno et al. | https://arxiv.org/abs/1606.06031 | "The LAMBADA evaluates the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole passage, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse." (Source: https://huggingface.co/datasets/lambada) |
|
17 |
+
| MMLU | Hendrycks et al. | https://github.com/hendrycks/test | "The benchmark covers 57 subjects across STEM, the humanities, the social sciences, and more. It ranges in difficulty from an elementary level to an advanced professional level, and it tests both world knowledge and problem solving ability. Subjects range from traditional areas, such as mathematics and history, to more specialized areas like law and ethics. The granularity and breadth of the subjects makes the benchmark ideal for identifying a modelβs blind spots." (Source: "https://paperswithcode.com/dataset/mmlu") |
|
18 |
+
| TriviaQA | Joshi et al. | https://arxiv.org/abs/1705.03551v2 | "We present TriviaQA, a challenging reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions." (Source: https://arxiv.org/abs/1705.03551v2) |
|
19 |
+
| WinoGrande | Sakaguchi et al. | https://arxiv.org/abs/1907.10641v2 | "A large-scale dataset of 44k [expert-crafted pronoun resolution] problems, inspired by the original WSC design, but adjusted to improve both the scale and the hardness of the dataset." (Source: https://arxiv.org/abs/1907.10641v2) |
|
results_tts.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Leaderboard
|
2 |
+
|
3 |
+
| Model Name | Publisher | Open? | Chatbot Arena Elo | HellaSwag (few-shot) | HellaSwag (zero-shot) | HellaSwag (one-shot) | HumanEval-Python (pass@1) | LAMBADA (zero-shot) | LAMBADA (one-shot) | MMLU (zero-shot) | MMLU (few-shot) | TriviaQA (zero-shot) | TriviaQA (one-shot) | WinoGrande (zero-shot) | WinoGrande (one-shot) | WinoGrande (few-shot) |
|
4 |
+
| ----------------------------------------------------------------------------------------------------------- | ------------------- | ----- | ------------------------------------------------ | ------------------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | --------------------------------------------- | --------------------------------------------------------------- | ------------------------------------------------------------------ | --------------------------------------------------------------- | --------------------------------------------------------------- |
|
5 |
+
| [alpaca-7b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | | | [0.739](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | | | | | | | | [0.661](https://gpt4all.io/reports/GPT4All_Technical_Report_3.pdf) | | |
|
6 |
+
| [alpaca-13b](https://crfm.stanford.edu/2023/03/13/alpaca.html) | Stanford | no | [1008](https://lmsys.org/blog/2023-05-03-arena/) | | | | | | | | | | | | | |
|
7 |
+
|
8 |
+
|
9 |
+
## Benchmarks
|
10 |
+
|
11 |
+
| Benchmark Name | Author | Link | Description |
|
12 |
+
| ----------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
13 |
+
| Chatbot Arena Elo | LMSYS | https://lmsys.org/blog/2023-05-03-arena/ | "In this blog post, we introduce Chatbot Arena, an LLM benchmark platform featuring anonymous randomized battles in a crowdsourced manner. Chatbot Arena adopts the Elo rating system, which is a widely-used rating system in chess and other competitive games." (Source: https://lmsys.org/blog/2023-05-03-arena/) |
|
14 |
+
| HellaSwag | Zellers et al. | https://arxiv.org/abs/1905.07830v1 | "HellaSwag is a challenge dataset for evaluating commonsense NLI that is specially hard for state-of-the-art models, though its questions are trivial for humans (>95% accuracy)." (Source: https://paperswithcode.com/dataset/hellaswag) |
|
15 |
+
| HumanEval | Chen et al. | https://arxiv.org/abs/2107.03374v2 | "It used to measure functional correctness for synthesizing programs from docstrings. It consists of 164 original programming problems, assessing language comprehension, algorithms, and simple mathematics, with some comparable to simple software interview questions." (Source: https://paperswithcode.com/dataset/humaneval) |
|
16 |
+
| LAMBADA | Paperno et al. | https://arxiv.org/abs/1606.06031 | "The LAMBADA evaluates the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristic that human subjects are able to guess their last word if they are exposed to the whole passage, but not if they only see the last sentence preceding the target word. To succeed on LAMBADA, computational models cannot simply rely on local context, but must be able to keep track of information in the broader discourse." (Source: https://huggingface.co/datasets/lambada) |
|
17 |
+
| MMLU | Hendrycks et al. | https://github.com/hendrycks/test | "The benchmark covers 57 subjects across STEM, the humanities, the social sciences, and more. It ranges in difficulty from an elementary level to an advanced professional level, and it tests both world knowledge and problem solving ability. Subjects range from traditional areas, such as mathematics and history, to more specialized areas like law and ethics. The granularity and breadth of the subjects makes the benchmark ideal for identifying a modelβs blind spots." (Source: "https://paperswithcode.com/dataset/mmlu") |
|
18 |
+
| TriviaQA | Joshi et al. | https://arxiv.org/abs/1705.03551v2 | "We present TriviaQA, a challenging reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions." (Source: https://arxiv.org/abs/1705.03551v2) |
|
19 |
+
| WinoGrande | Sakaguchi et al. | https://arxiv.org/abs/1907.10641v2 | "A large-scale dataset of 44k [expert-crafted pronoun resolution] problems, inspired by the original WSC design, but adjusted to improve both the scale and the hardness of the dataset." (Source: https://arxiv.org/abs/1907.10641v2) |
|
setup.cfg
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
[flake8]
|
2 |
+
max-line-length = 140
|
streamlit_app.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import re
|
3 |
+
from collections.abc import Iterable
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
import streamlit as st
|
7 |
+
from pandas.api.types import (is_bool_dtype, is_datetime64_any_dtype,
|
8 |
+
is_numeric_dtype)
|
9 |
+
|
10 |
+
|
11 |
+
GITHUB_URL = "https://github.com/espnet/espnet"
|
12 |
+
NON_BENCHMARK_COLS = ["Open?", "Publisher"]
|
13 |
+
|
14 |
+
|
15 |
+
def extract_table_and_format_from_markdown_text(markdown_table: str) -> pd.DataFrame:
|
16 |
+
"""Extracts a table from a markdown text and formats it as a pandas DataFrame.
|
17 |
+
Args:
|
18 |
+
text (str): Markdown text containing a table.
|
19 |
+
Returns:
|
20 |
+
pd.DataFrame: Table as pandas DataFrame.
|
21 |
+
"""
|
22 |
+
df = (
|
23 |
+
pd.read_table(io.StringIO(markdown_table), sep="|", header=0, index_col=1)
|
24 |
+
.dropna(axis=1, how="all") # drop empty columns
|
25 |
+
.iloc[
|
26 |
+
1:
|
27 |
+
] # drop first row which is the "----" separator of the original markdown table
|
28 |
+
.sort_index(ascending=True)
|
29 |
+
.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
|
30 |
+
.replace("", float("NaN"))
|
31 |
+
.apply(pd.to_numeric, errors="ignore")
|
32 |
+
)
|
33 |
+
|
34 |
+
# remove whitespace from column names and index
|
35 |
+
df.columns = df.columns.str.strip()
|
36 |
+
df.index = df.index.str.strip()
|
37 |
+
df.index.name = df.index.name.strip()
|
38 |
+
|
39 |
+
return df
|
40 |
+
|
41 |
+
|
42 |
+
def extract_markdown_table_from_multiline(
|
43 |
+
multiline: str, table_headline: str, next_headline_start: str = "#"
|
44 |
+
) -> str:
|
45 |
+
"""Extracts the markdown table from a multiline string.
|
46 |
+
Args:
|
47 |
+
multiline (str): content of README.md file.
|
48 |
+
table_headline (str): Headline of the table in the README.md file.
|
49 |
+
next_headline_start (str, optional): Start of the next headline. Defaults to "#".
|
50 |
+
Returns:
|
51 |
+
str: Markdown table.
|
52 |
+
Raises:
|
53 |
+
ValueError: If the table could not be found.
|
54 |
+
"""
|
55 |
+
# extract everything between the table headline and the next headline
|
56 |
+
table = []
|
57 |
+
start = False
|
58 |
+
for line in multiline.split("\n"):
|
59 |
+
if line.startswith(table_headline):
|
60 |
+
start = True
|
61 |
+
elif line.startswith(next_headline_start):
|
62 |
+
start = False
|
63 |
+
elif start:
|
64 |
+
table.append(line + "\n")
|
65 |
+
|
66 |
+
if len(table) == 0:
|
67 |
+
raise ValueError(f"Could not find table with headline '{table_headline}'")
|
68 |
+
|
69 |
+
return "".join(table)
|
70 |
+
|
71 |
+
|
72 |
+
def remove_markdown_links(text: str) -> str:
|
73 |
+
"""Modifies a markdown text to remove all markdown links.
|
74 |
+
Example: [DISPLAY](LINK) to DISPLAY
|
75 |
+
First find all markdown links with regex.
|
76 |
+
Then replace them with: $1
|
77 |
+
Args:
|
78 |
+
text (str): Markdown text containing markdown links
|
79 |
+
Returns:
|
80 |
+
str: Markdown text without markdown links.
|
81 |
+
"""
|
82 |
+
|
83 |
+
# find all markdown links
|
84 |
+
markdown_links = re.findall(r"\[([^\]]+)\]\(([^)]+)\)", text)
|
85 |
+
|
86 |
+
# remove link keep display text
|
87 |
+
for display, link in markdown_links:
|
88 |
+
text = text.replace(f"[{display}]({link})", display)
|
89 |
+
|
90 |
+
return text
|
91 |
+
|
92 |
+
|
93 |
+
def filter_dataframe_by_row_and_columns(
|
94 |
+
df: pd.DataFrame, ignore_columns: list[str] | None = None
|
95 |
+
) -> pd.DataFrame:
|
96 |
+
"""
|
97 |
+
Filter dataframe by the rows and columns to display.
|
98 |
+
This does not select based on the values in the dataframe, but rather on the index and columns.
|
99 |
+
Modified from https://blog.streamlit.io/auto-generate-a-dataframe-filtering-ui-in-streamlit-with-filter_dataframe/
|
100 |
+
Args:
|
101 |
+
df (pd.DataFrame): Original dataframe
|
102 |
+
ignore_columns (list[str], optional): Columns to ignore. Defaults to None.
|
103 |
+
Returns:
|
104 |
+
pd.DataFrame: Filtered dataframe
|
105 |
+
"""
|
106 |
+
df = df.copy()
|
107 |
+
|
108 |
+
if ignore_columns is None:
|
109 |
+
ignore_columns = []
|
110 |
+
|
111 |
+
modification_container = st.container()
|
112 |
+
|
113 |
+
with modification_container:
|
114 |
+
to_filter_index = st.multiselect("Filter by model:", sorted(df.index))
|
115 |
+
if to_filter_index:
|
116 |
+
df = pd.DataFrame(df.loc[to_filter_index])
|
117 |
+
|
118 |
+
to_filter_columns = st.multiselect(
|
119 |
+
"Filter by benchmark:",
|
120 |
+
sorted([c for c in df.columns if c not in ignore_columns]),
|
121 |
+
)
|
122 |
+
if to_filter_columns:
|
123 |
+
df = pd.DataFrame(df[ignore_columns + to_filter_columns])
|
124 |
+
|
125 |
+
return df
|
126 |
+
|
127 |
+
|
128 |
+
def filter_dataframe_by_column_values(df: pd.DataFrame) -> pd.DataFrame:
|
129 |
+
"""
|
130 |
+
Filter dataframe by the values in the dataframe.
|
131 |
+
Modified from https://blog.streamlit.io/auto-generate-a-dataframe-filtering-ui-in-streamlit-with-filter_dataframe/
|
132 |
+
Args:
|
133 |
+
df (pd.DataFrame): Original dataframe
|
134 |
+
Returns:
|
135 |
+
pd.DataFrame: Filtered dataframe
|
136 |
+
"""
|
137 |
+
df = df.copy()
|
138 |
+
|
139 |
+
modification_container = st.container()
|
140 |
+
|
141 |
+
with modification_container:
|
142 |
+
to_filter_columns = st.multiselect("Filter results on:", df.columns)
|
143 |
+
left, right = st.columns((1, 20))
|
144 |
+
|
145 |
+
for column in to_filter_columns:
|
146 |
+
if is_bool_dtype(df[column]):
|
147 |
+
user_bool_input = right.checkbox(f"{column}", value=True)
|
148 |
+
df = df[df[column] == user_bool_input]
|
149 |
+
|
150 |
+
elif is_numeric_dtype(df[column]):
|
151 |
+
_min = float(df[column].min())
|
152 |
+
_max = float(df[column].max())
|
153 |
+
|
154 |
+
if (_min != _max) and pd.notna(_min) and pd.notna(_max):
|
155 |
+
step = 0.01
|
156 |
+
user_num_input = right.slider(
|
157 |
+
f"Values for {column}:",
|
158 |
+
min_value=round(_min - step, 2),
|
159 |
+
max_value=round(_max + step, 2),
|
160 |
+
value=(_min, _max),
|
161 |
+
step=step,
|
162 |
+
)
|
163 |
+
df = df[df[column].between(*user_num_input)]
|
164 |
+
|
165 |
+
elif is_datetime64_any_dtype(df[column]):
|
166 |
+
user_date_input = right.date_input(
|
167 |
+
f"Values for {column}:",
|
168 |
+
value=(
|
169 |
+
df[column].min(),
|
170 |
+
df[column].max(),
|
171 |
+
),
|
172 |
+
)
|
173 |
+
if isinstance(user_date_input, Iterable) and len(user_date_input) == 2:
|
174 |
+
user_date_input_datetime = tuple(
|
175 |
+
map(pd.to_datetime, user_date_input)
|
176 |
+
)
|
177 |
+
start_date, end_date = user_date_input_datetime
|
178 |
+
df = df.loc[df[column].between(start_date, end_date)]
|
179 |
+
|
180 |
+
else:
|
181 |
+
selected_values = right.multiselect(
|
182 |
+
f"Values for {column}:",
|
183 |
+
sorted(df[column].unique()),
|
184 |
+
)
|
185 |
+
|
186 |
+
if selected_values:
|
187 |
+
df = df[df[column].isin(selected_values)]
|
188 |
+
|
189 |
+
return df
|
190 |
+
|
191 |
+
|
192 |
+
def setup_basic():
|
193 |
+
title = "π The ESPnet Leaderboard"
|
194 |
+
|
195 |
+
st.set_page_config(
|
196 |
+
page_title=title,
|
197 |
+
page_icon="π",
|
198 |
+
layout="wide",
|
199 |
+
)
|
200 |
+
st.title(title)
|
201 |
+
|
202 |
+
st.markdown(
|
203 |
+
"A joint community effort to create one central leaderboard for models developed with ESPnet."
|
204 |
+
f" Visit [ESPnet]({GITHUB_URL}) to contribute. \n"
|
205 |
+
)
|
206 |
+
|
207 |
+
|
208 |
+
def setup_leaderboard(readme: str, task:str, task_name: str):
|
209 |
+
leaderboard_table = extract_markdown_table_from_multiline(
|
210 |
+
readme, table_headline="## Leaderboard"
|
211 |
+
)
|
212 |
+
leaderboard_table = remove_markdown_links(leaderboard_table)
|
213 |
+
df_leaderboard = extract_table_and_format_from_markdown_text(leaderboard_table)
|
214 |
+
df_leaderboard["Open?"] = (
|
215 |
+
df_leaderboard["Open?"].map({"yes": 1, "no": 0}).astype(bool)
|
216 |
+
)
|
217 |
+
|
218 |
+
st.markdown(f"## {task_name} Leaderboard")
|
219 |
+
modify = st.checkbox("Add filters", key=f"lb_modify_{task}")
|
220 |
+
clear_empty_entries = st.checkbox("Clear empty entries", value=True, key=f"lb_clear_{task}")
|
221 |
+
|
222 |
+
if modify:
|
223 |
+
df_leaderboard = filter_dataframe_by_row_and_columns(
|
224 |
+
df_leaderboard, ignore_columns=NON_BENCHMARK_COLS
|
225 |
+
)
|
226 |
+
df_leaderboard = filter_dataframe_by_column_values(df_leaderboard)
|
227 |
+
|
228 |
+
if clear_empty_entries:
|
229 |
+
df_leaderboard = df_leaderboard.dropna(axis=1, how="all")
|
230 |
+
benchmark_columns = [
|
231 |
+
c for c in df_leaderboard.columns if df_leaderboard[c].dtype == float
|
232 |
+
]
|
233 |
+
rows_wo_any_benchmark = df_leaderboard[benchmark_columns].isna().all(axis=1)
|
234 |
+
df_leaderboard = df_leaderboard[~rows_wo_any_benchmark]
|
235 |
+
|
236 |
+
st.dataframe(df_leaderboard)
|
237 |
+
|
238 |
+
st.download_button(
|
239 |
+
"Download current selection as .html",
|
240 |
+
df_leaderboard.to_html().encode("utf-8"),
|
241 |
+
"leaderboard.html",
|
242 |
+
"text/html",
|
243 |
+
key=f"download-html-{task}",
|
244 |
+
)
|
245 |
+
|
246 |
+
st.download_button(
|
247 |
+
"Download current selection as .csv",
|
248 |
+
df_leaderboard.to_csv().encode("utf-8"),
|
249 |
+
"leaderboard.csv",
|
250 |
+
"text/csv",
|
251 |
+
key=f"download-csv-{task}",
|
252 |
+
)
|
253 |
+
|
254 |
+
|
255 |
+
def setup_benchmarks(readme: str, task: str):
|
256 |
+
benchmarks_table = extract_markdown_table_from_multiline(
|
257 |
+
readme, table_headline="## Benchmarks"
|
258 |
+
)
|
259 |
+
df_benchmarks = extract_table_and_format_from_markdown_text(benchmarks_table)
|
260 |
+
|
261 |
+
st.markdown("## Covered Benchmarks")
|
262 |
+
|
263 |
+
selected_benchmark = st.selectbox(
|
264 |
+
"Select a benchmark to learn more:", df_benchmarks.index.unique(), key=f"chkb_bench_{task}"
|
265 |
+
)
|
266 |
+
df_selected = df_benchmarks.loc[selected_benchmark]
|
267 |
+
text = [
|
268 |
+
f"Name: {selected_benchmark}",
|
269 |
+
]
|
270 |
+
for key in df_selected.keys():
|
271 |
+
text.append(f"{key}: {df_selected[key]} ")
|
272 |
+
st.markdown(" \n".join(text))
|
273 |
+
|
274 |
+
|
275 |
+
def setup_sources():
|
276 |
+
st.markdown("## Sources")
|
277 |
+
st.markdown(
|
278 |
+
"The results of this leaderboard are collected from the individual papers and published results of the model "
|
279 |
+
"authors. If you are interested in the sources of each individual reported model value, please visit the "
|
280 |
+
f"[ESPnet]({GITHUB_URL}) repository."
|
281 |
+
)
|
282 |
+
st.markdown(
|
283 |
+
"""
|
284 |
+
Special thanks to the following pages:
|
285 |
+
- [LLM-Leaderboard](https://llm-leaderboard.streamlit.app/)
|
286 |
+
- [HF Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
287 |
+
"""
|
288 |
+
)
|
289 |
+
|
290 |
+
|
291 |
+
def setup_disclaimer():
|
292 |
+
st.markdown("## Disclaimer")
|
293 |
+
st.markdown(
|
294 |
+
"Above information may be wrong. If you want to use a published model for commercial use, please contact a "
|
295 |
+
"lawyer."
|
296 |
+
)
|
297 |
+
|
298 |
+
|
299 |
+
def setup_footer():
|
300 |
+
st.markdown(
|
301 |
+
"""
|
302 |
+
---
|
303 |
+
Made with β€οΈ by the awesome open-source community from all over π.
|
304 |
+
"""
|
305 |
+
)
|
306 |
+
|
307 |
+
|
308 |
+
def main():
|
309 |
+
setup_basic()
|
310 |
+
|
311 |
+
tasks = {
|
312 |
+
"asr": "Automatic Speech Recognition",
|
313 |
+
"tts": "Text-to-Speech",
|
314 |
+
"slu": "Spoken Langauge Understanding",
|
315 |
+
"diar": "Diarization",
|
316 |
+
"sr": "Speaker Recognition",
|
317 |
+
"slm": "Speech Language Modeling"
|
318 |
+
}
|
319 |
+
|
320 |
+
tabs = st.tabs([x.upper() for x in tasks] + ["Submit"])
|
321 |
+
for idx, task in enumerate(tasks):
|
322 |
+
with open(f"results_{task}.md", "r") as f:
|
323 |
+
readme = f.read()
|
324 |
+
with tabs[idx]:
|
325 |
+
setup_leaderboard(readme, task, tasks[task])
|
326 |
+
setup_benchmarks(readme, task)
|
327 |
+
setup_sources()
|
328 |
+
setup_disclaimer()
|
329 |
+
setup_footer()
|
330 |
+
|
331 |
+
|
332 |
+
if __name__ == "__main__":
|
333 |
+
main()
|