UltraRonin commited on
Commit
b866cfe
·
1 Parent(s): e38dcf1
requests/DeepSeek-R1-Distill-Qwen-1.5B_eval_request_False_float16_Original.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "status": "FAILED", "submitted_time": "2025-02-06T00:48:20Z", "model_type": "\u2b55 : instruction-tuned", "likes": 675, "params": 1.777, "license": "mit", "private": false}
results/DeepSeek-R1.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "deepseek-ai/DeepSeek-R1"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "100.0",
8
+ "S-Acc": "58.4",
9
+ "EM": "20.0",
10
+ "PM-0.5": "62.0",
11
+ "Tokens": "9856"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "100.0",
15
+ "S-Acc": "62.2",
16
+ "EM": "0.0",
17
+ "PM-0.5": "83.0",
18
+ "Tokens": "10077"
19
+ },
20
+ "Crossword": {
21
+ "CR": "100.0",
22
+ "S-Acc": "75.3",
23
+ "EM": "16.7",
24
+ "PM-0.5": "94.0",
25
+ "Tokens": "9810"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "100.0",
29
+ "S-Acc": "26.0",
30
+ "EM": "4.0",
31
+ "PM-0.5": "21.0",
32
+ "Tokens": "10344"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "100.0",
36
+ "S-Acc": "69.4",
37
+ "EM": "42.5",
38
+ "PM-0.5": "68.0",
39
+ "Tokens": "9205"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "100.0",
43
+ "S-Acc": "70.3",
44
+ "EM": "50.0",
45
+ "PM-0.5": "64.0",
46
+ "Tokens": "8277"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "100.0",
50
+ "S-Acc": "47.3",
51
+ "EM": "7.0",
52
+ "PM-0.5": "42.0",
53
+ "Tokens": "11422"
54
+ }
55
+ }
56
+ }
results/Gemini-2.0-flash-thinking.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Gemini-2.0-flash-thinking"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "88.2",
8
+ "S-Acc": "39.4",
9
+ "EM": "4.3",
10
+ "PM-0.5": "35.0",
11
+ "Tokens": "3725"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "92.0",
15
+ "S-Acc": "40.7",
16
+ "EM": "0.0",
17
+ "PM-0.5": "27.0",
18
+ "Tokens": "4257"
19
+ },
20
+ "Crossword": {
21
+ "CR": "94.7",
22
+ "S-Acc": "57.7",
23
+ "EM": "1.3",
24
+ "PM-0.5": "79.3",
25
+ "Tokens": "2648"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "68.0",
29
+ "S-Acc": "11.2",
30
+ "EM": "0.0",
31
+ "PM-0.5": "2.0",
32
+ "Tokens": "4167"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "99.0",
36
+ "S-Acc": "45.9",
37
+ "EM": "8.0",
38
+ "PM-0.5": "37.5",
39
+ "Tokens": "4038"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "79.5",
43
+ "S-Acc": "46.5",
44
+ "EM": "16.5",
45
+ "PM-0.5": "41.0",
46
+ "Tokens": "3853"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "96.0",
50
+ "S-Acc": "34.4",
51
+ "EM": "0.0",
52
+ "PM-0.5": "23.0",
53
+ "Tokens": "3386"
54
+ }
55
+ }
56
+ }
results/Gemini-2.0-flash.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Gemini-2.0-flash"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "81.1",
8
+ "S-Acc": "37.0",
9
+ "EM": "2.4",
10
+ "PM-0.5": "34.5",
11
+ "Tokens": "2637"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "98.0",
15
+ "S-Acc": "48.0",
16
+ "EM": "0.0",
17
+ "PM-0.5": "48.0",
18
+ "Tokens": "4020"
19
+ },
20
+ "Crossword": {
21
+ "CR": "98.7",
22
+ "S-Acc": "61.6",
23
+ "EM": "0.0",
24
+ "PM-0.5": "83.3",
25
+ "Tokens": "2555"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "47.0",
29
+ "S-Acc": "8.5",
30
+ "EM": "0.0",
31
+ "PM-0.5": "1.0",
32
+ "Tokens": "1585"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "58.0",
36
+ "S-Acc": "24.2",
37
+ "EM": "2.0",
38
+ "PM-0.5": "20.0",
39
+ "Tokens": "2104"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "93.0",
43
+ "S-Acc": "45.3",
44
+ "EM": "12.5",
45
+ "PM-0.5": "37.5",
46
+ "Tokens": "2842"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "92.0",
50
+ "S-Acc": "34.3",
51
+ "EM": "0.0",
52
+ "PM-0.5": "17.0",
53
+ "Tokens": "2717"
54
+ }
55
+ }
56
+ }
results/Llama-3.1-70B-Instruct.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "meta-llama/Meta-Llama-3.1-70B-Instruct"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "71.8",
8
+ "S-Acc": "27.4",
9
+ "EM": "0.5",
10
+ "PM-0.5": "21.9",
11
+ "Tokens": "2090"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "84.0",
15
+ "S-Acc": "35.8",
16
+ "EM": "0.0",
17
+ "PM-0.5": "21.0",
18
+ "Tokens": "3565"
19
+ },
20
+ "Crossword": {
21
+ "CR": "77.3",
22
+ "S-Acc": "46.8",
23
+ "EM": "0.0",
24
+ "PM-0.5": "62.0",
25
+ "Tokens": "3072"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "62.0",
29
+ "S-Acc": "6.9",
30
+ "EM": "0.0",
31
+ "PM-0.5": "1.0",
32
+ "Tokens": "1298"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "56.0",
36
+ "S-Acc": "22.8",
37
+ "EM": "2.0",
38
+ "PM-0.5": "18.0",
39
+ "Tokens": "1165"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "69.5",
43
+ "S-Acc": "24.2",
44
+ "EM": "1.0",
45
+ "PM-0.5": "17.5",
46
+ "Tokens": "1940"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "82.0",
50
+ "S-Acc": "27.7",
51
+ "EM": "0.0",
52
+ "PM-0.5": "12.0",
53
+ "Tokens": "1498"
54
+ }
55
+ }
56
+ }
results/Llama-3.1-8B-Instruct.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "meta-llama/Meta-Llama-3.1-8B-Instruct"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "42.6",
8
+ "S-Acc": "9.9",
9
+ "EM": "0.0",
10
+ "PM-0.5": "3.8",
11
+ "Tokens": "2478"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "43.0",
15
+ "S-Acc": "5.6",
16
+ "EM": "0.0",
17
+ "PM-0.5": "0.0",
18
+ "Tokens": "3712"
19
+ },
20
+ "Crossword": {
21
+ "CR": "61.3",
22
+ "S-Acc": "23.3",
23
+ "EM": "0.0",
24
+ "PM-0.5": "14.0",
25
+ "Tokens": "2888"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "43.0",
29
+ "S-Acc": "2.3",
30
+ "EM": "0.0",
31
+ "PM-0.5": "0.0",
32
+ "Tokens": "2068"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "57.0",
36
+ "S-Acc": "16.0",
37
+ "EM": "0.0",
38
+ "PM-0.5": "8.0",
39
+ "Tokens": "1293"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "7.5",
43
+ "S-Acc": "1.2",
44
+ "EM": "0.0",
45
+ "PM-0.5": "0.0",
46
+ "Tokens": "2782"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "44.0",
50
+ "S-Acc": "11.2",
51
+ "EM": "0.0",
52
+ "PM-0.5": "1.0",
53
+ "Tokens": "2123"
54
+ }
55
+ }
56
+ }
results/Llama-3.3-70B-Instruct.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "meta-llama/Llama-3.3-70B-Instruct"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "92.4",
8
+ "S-Acc": "33.1",
9
+ "EM": "1.3",
10
+ "PM-0.5": "25.8",
11
+ "Tokens": "1842"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "97.0",
15
+ "S-Acc": "40.8",
16
+ "EM": "0.0",
17
+ "PM-0.5": "28.0",
18
+ "Tokens": "3584"
19
+ },
20
+ "Crossword": {
21
+ "CR": "85.3",
22
+ "S-Acc": "47.6",
23
+ "EM": "0.0",
24
+ "PM-0.5": "65.3",
25
+ "Tokens": "2613"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "99.0",
29
+ "S-Acc": "14.3",
30
+ "EM": "0.0",
31
+ "PM-0.5": "1.0",
32
+ "Tokens": "1137"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "80.5",
36
+ "S-Acc": "32.2",
37
+ "EM": "1.0",
38
+ "PM-0.5": "25.0",
39
+ "Tokens": "1738"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "93.5",
43
+ "S-Acc": "34.8",
44
+ "EM": "7.0",
45
+ "PM-0.5": "22.5",
46
+ "Tokens": "1062"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "99.0",
50
+ "S-Acc": "29.0",
51
+ "EM": "0.0",
52
+ "PM-0.5": "13.0",
53
+ "Tokens": "918"
54
+ }
55
+ }
56
+ }
results/Mistral-7B-Instruct-v0.3.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "mistralai/Mistral-7B-Instruct-v0.3"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "85.8",
8
+ "S-Acc": "12.1",
9
+ "EM": "0.0",
10
+ "PM-0.5": "2.3",
11
+ "Tokens": "2736"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "75.0",
15
+ "S-Acc": "7.9",
16
+ "EM": "0.0",
17
+ "PM-0.5": "0.0",
18
+ "Tokens": "4600"
19
+ },
20
+ "Crossword": {
21
+ "CR": "94.0",
22
+ "S-Acc": "23.0",
23
+ "EM": "0.0",
24
+ "PM-0.5": "6.7",
25
+ "Tokens": "3655"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "99.0",
29
+ "S-Acc": "4.3",
30
+ "EM": "0.0",
31
+ "PM-0.5": "0.0",
32
+ "Tokens": "1096"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "97.0",
36
+ "S-Acc": "19.1",
37
+ "EM": "0.0",
38
+ "PM-0.5": "4.5",
39
+ "Tokens": "1618"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "84.0",
43
+ "S-Acc": "11.9",
44
+ "EM": "0.0",
45
+ "PM-0.5": "1.5",
46
+ "Tokens": "3108"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "66.0",
50
+ "S-Acc": "6.6",
51
+ "EM": "0.0",
52
+ "PM-0.5": "1.0",
53
+ "Tokens": "2337"
54
+ }
55
+ }
56
+ }
results/Mistral-Large-Instruct-2411.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "mistralai/Mistral-Large-Instruct-2411"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "96.1",
8
+ "S-Acc": "36.4",
9
+ "EM": "2.5",
10
+ "PM-0.5": "30.0",
11
+ "Tokens": "2313"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "98.0",
15
+ "S-Acc": "39.4",
16
+ "EM": "0.0",
17
+ "PM-0.5": "20.0",
18
+ "Tokens": "4279"
19
+ },
20
+ "Crossword": {
21
+ "CR": "99.3",
22
+ "S-Acc": "62.8",
23
+ "EM": "2.0",
24
+ "PM-0.5": "86.0",
25
+ "Tokens": "3237"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "96.0",
29
+ "S-Acc": "13.7",
30
+ "EM": "0.0",
31
+ "PM-0.5": "1.0",
32
+ "Tokens": "1204"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "100.0",
36
+ "S-Acc": "38.3",
37
+ "EM": "3.0",
38
+ "PM-0.5": "30.5",
39
+ "Tokens": "1637"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "85.5",
43
+ "S-Acc": "39.5",
44
+ "EM": "10.0",
45
+ "PM-0.5": "33.5",
46
+ "Tokens": "1955"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "98.0",
50
+ "S-Acc": "24.7",
51
+ "EM": "0.0",
52
+ "PM-0.5": "9.0",
53
+ "Tokens": "1566"
54
+ }
55
+ }
56
+ }
results/Mistral-Small-Instruct-2409.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "mistralai/Mistral-Small-Instruct-2409"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "91.0",
8
+ "S-Acc": "23.1",
9
+ "EM": "0.2",
10
+ "PM-0.5": "13.3",
11
+ "Tokens": "2273"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "67.0",
15
+ "S-Acc": "5.5",
16
+ "EM": "0.0",
17
+ "PM-0.5": "0.0",
18
+ "Tokens": "4171"
19
+ },
20
+ "Crossword": {
21
+ "CR": "98.7",
22
+ "S-Acc": "48.3",
23
+ "EM": "0.0",
24
+ "PM-0.5": "54.0",
25
+ "Tokens": "3135"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "95.0",
29
+ "S-Acc": "7.0",
30
+ "EM": "0.0",
31
+ "PM-0.5": "0.0",
32
+ "Tokens": "1233"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "99.5",
36
+ "S-Acc": "30.7",
37
+ "EM": "0.5",
38
+ "PM-0.5": "12.5",
39
+ "Tokens": "1514"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "89.0",
43
+ "S-Acc": "20.5",
44
+ "EM": "0.5",
45
+ "PM-0.5": "7.5",
46
+ "Tokens": "1968"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "97.0",
50
+ "S-Acc": "26.9",
51
+ "EM": "0.0",
52
+ "PM-0.5": "6.0",
53
+ "Tokens": "1615"
54
+ }
55
+ }
56
+ }
results/OpenAI-gpt-4o.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "OpenAI-gpt-4o"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "99.8",
8
+ "S-Acc": "43.7",
9
+ "EM": "3.2",
10
+ "PM-0.5": "41.7",
11
+ "Tokens": "1486"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "100.0",
15
+ "S-Acc": "56.0",
16
+ "EM": "0.0",
17
+ "PM-0.5": "67.0",
18
+ "Tokens": "3229"
19
+ },
20
+ "Crossword": {
21
+ "CR": "100.0",
22
+ "S-Acc": "66.0",
23
+ "EM": "1.3",
24
+ "PM-0.5": "86.7",
25
+ "Tokens": "1726"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "100.0",
29
+ "S-Acc": "20.7",
30
+ "EM": "0.0",
31
+ "PM-0.5": "5.0",
32
+ "Tokens": "740"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "100.0",
36
+ "S-Acc": "39.3",
37
+ "EM": "3.5",
38
+ "PM-0.5": "29.5",
39
+ "Tokens": "953"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "100.0",
43
+ "S-Acc": "52.2",
44
+ "EM": "14.5",
45
+ "PM-0.5": "48.0",
46
+ "Tokens": "1104"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "99.0",
50
+ "S-Acc": "31.1",
51
+ "EM": "0.0",
52
+ "PM-0.5": "14.0",
53
+ "Tokens": "1165"
54
+ }
55
+ }
56
+ }
results/OpenAI-o1-mini.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "OpenAI-o1-mini"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "97.7",
8
+ "S-Acc": "41.3",
9
+ "EM": "9.1",
10
+ "PM-0.5": "32.8",
11
+ "Tokens": "9576"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "97.0",
15
+ "S-Acc": "34.7",
16
+ "EM": "0.0",
17
+ "PM-0.5": "12.0",
18
+ "Tokens": "10952"
19
+ },
20
+ "Crossword": {
21
+ "CR": "95.3",
22
+ "S-Acc": "45.5",
23
+ "EM": "1.3",
24
+ "PM-0.5": "54.0",
25
+ "Tokens": "7840"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "100.0",
29
+ "S-Acc": "22.7",
30
+ "EM": "1.0",
31
+ "PM-0.5": "13.0",
32
+ "Tokens": "11208"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "99.0",
36
+ "S-Acc": "57.2",
37
+ "EM": "23.5",
38
+ "PM-0.5": "53.5",
39
+ "Tokens": "10242"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "99.0",
43
+ "S-Acc": "53.4",
44
+ "EM": "27.0",
45
+ "PM-0.5": "43.0",
46
+ "Tokens": "3961"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "96.0",
50
+ "S-Acc": "34.3",
51
+ "EM": "2.0",
52
+ "PM-0.5": "21.0",
53
+ "Tokens": "13255"
54
+ }
55
+ }
56
+ }
results/OpenAI-o1-preview.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "OpenAI-o1-preview"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "96.3",
8
+ "S-Acc": "58.7",
9
+ "EM": "23.6",
10
+ "PM-0.5": "61.7",
11
+ "Tokens": "11436"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "100.0",
15
+ "S-Acc": "67.2",
16
+ "EM": "0.0",
17
+ "PM-0.5": "90.0",
18
+ "Tokens": "14847"
19
+ },
20
+ "Crossword": {
21
+ "CR": "98.0",
22
+ "S-Acc": "77.7",
23
+ "EM": "24.7",
24
+ "PM-0.5": "89.3",
25
+ "Tokens": "10098"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "92.0",
29
+ "S-Acc": "34.8",
30
+ "EM": "13.0",
31
+ "PM-0.5": "29.0",
32
+ "Tokens": "12567"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "99.0",
36
+ "S-Acc": "68.8",
37
+ "EM": "41.0",
38
+ "PM-0.5": "68.5",
39
+ "Tokens": "9449"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "91.5",
43
+ "S-Acc": "65.1",
44
+ "EM": "50.0",
45
+ "PM-0.5": "55.5",
46
+ "Tokens": "8062"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "97.0",
50
+ "S-Acc": "38.8",
51
+ "EM": "13.0",
52
+ "PM-0.5": "38.0",
53
+ "Tokens": "13595"
54
+ }
55
+ }
56
+ }
results/QwQ-32B-Preview.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/QwQ-32B-Preview"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "65.0",
8
+ "S-Acc": "26.6",
9
+ "EM": "8.5",
10
+ "PM-0.5": "19.3",
11
+ "Tokens": "6709"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "97.0",
15
+ "S-Acc": "31.6",
16
+ "EM": "0.0",
17
+ "PM-0.5": "6.0",
18
+ "Tokens": "4964"
19
+ },
20
+ "Crossword": {
21
+ "CR": "80.0",
22
+ "S-Acc": "30.2",
23
+ "EM": "0.0",
24
+ "PM-0.5": "18.0",
25
+ "Tokens": "4817"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "47.0",
29
+ "S-Acc": "3.6",
30
+ "EM": "0.0",
31
+ "PM-0.5": "0.0",
32
+ "Tokens": "6492"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "78.5",
36
+ "S-Acc": "46.3",
37
+ "EM": "19.5",
38
+ "PM-0.5": "48.0",
39
+ "Tokens": "9524"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "54.5",
43
+ "S-Acc": "40.1",
44
+ "EM": "31.5",
45
+ "PM-0.5": "35.5",
46
+ "Tokens": "8381"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "33.0",
50
+ "S-Acc": "7.5",
51
+ "EM": "0.0",
52
+ "PM-0.5": "8.0",
53
+ "Tokens": "6078"
54
+ }
55
+ }
56
+ }
results/Qwen2.5-32B-Instruct.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-32B-Instruct"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "96.2",
8
+ "S-Acc": "29.9",
9
+ "EM": "0.6",
10
+ "PM-0.5": "14.8",
11
+ "Tokens": "1924"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "100.0",
15
+ "S-Acc": "31.8",
16
+ "EM": "0.0",
17
+ "PM-0.5": "2.0",
18
+ "Tokens": "4073"
19
+ },
20
+ "Crossword": {
21
+ "CR": "100.0",
22
+ "S-Acc": "34.6",
23
+ "EM": "0.0",
24
+ "PM-0.5": "20.0",
25
+ "Tokens": "2560"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "89.0",
29
+ "S-Acc": "9.8",
30
+ "EM": "0.0",
31
+ "PM-0.5": "0.0",
32
+ "Tokens": "1303"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "93.0",
36
+ "S-Acc": "32.2",
37
+ "EM": "0.0",
38
+ "PM-0.5": "22.5",
39
+ "Tokens": "1208"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "100.0",
43
+ "S-Acc": "42.8",
44
+ "EM": "3.5",
45
+ "PM-0.5": "30.5",
46
+ "Tokens": "1202"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "95.0",
50
+ "S-Acc": "28.4",
51
+ "EM": "0.0",
52
+ "PM-0.5": "14.0",
53
+ "Tokens": "1197"
54
+ }
55
+ }
56
+ }
results/Qwen2.5-72B-Instruct.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-72B-Instruct"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "95.0",
8
+ "S-Acc": "33.9",
9
+ "EM": "0.9",
10
+ "PM-0.5": "20.8",
11
+ "Tokens": "2359"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "100.0",
15
+ "S-Acc": "39.3",
16
+ "EM": "0.0",
17
+ "PM-0.5": "18.0",
18
+ "Tokens": "4111"
19
+ },
20
+ "Crossword": {
21
+ "CR": "100.0",
22
+ "S-Acc": "44.1",
23
+ "EM": "0.0",
24
+ "PM-0.5": "36.7",
25
+ "Tokens": "2735"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "85.0",
29
+ "S-Acc": "11.8",
30
+ "EM": "0.0",
31
+ "PM-0.5": "0.0",
32
+ "Tokens": "1727"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "93.5",
36
+ "S-Acc": "34.0",
37
+ "EM": "0.0",
38
+ "PM-0.5": "23.0",
39
+ "Tokens": "1810"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "97.5",
43
+ "S-Acc": "43.0",
44
+ "EM": "5.5",
45
+ "PM-0.5": "34.0",
46
+ "Tokens": "2013"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "94.0",
50
+ "S-Acc": "30.9",
51
+ "EM": "0.0",
52
+ "PM-0.5": "13.0",
53
+ "Tokens": "1757"
54
+ }
55
+ }
56
+ }
results/Qwen2.5-7B-Instruct.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-7B-Instruct"
4
+ },
5
+ "results": {
6
+ "Overall": {
7
+ "CR": "85.1",
8
+ "S-Acc": "17.7",
9
+ "EM": "0.3",
10
+ "PM-0.5": "5.1",
11
+ "Tokens": "2086"
12
+ },
13
+ "Acrostic": {
14
+ "CR": "42.0",
15
+ "S-Acc": "3.6",
16
+ "EM": "0.0",
17
+ "PM-0.5": "0.0",
18
+ "Tokens": "4159"
19
+ },
20
+ "Crossword": {
21
+ "CR": "98.7",
22
+ "S-Acc": "21.1",
23
+ "EM": "0.0",
24
+ "PM-0.5": "3.3",
25
+ "Tokens": "2441"
26
+ },
27
+ "Cryptogram": {
28
+ "CR": "81.0",
29
+ "S-Acc": "3.5",
30
+ "EM": "0.0",
31
+ "PM-0.5": "0.0",
32
+ "Tokens": "1181"
33
+ },
34
+ "Logic_Puzzle": {
35
+ "CR": "96.5",
36
+ "S-Acc": "25.8",
37
+ "EM": "0.0",
38
+ "PM-0.5": "8.5",
39
+ "Tokens": "1396"
40
+ },
41
+ "Sudoku": {
42
+ "CR": "94.5",
43
+ "S-Acc": "30.2",
44
+ "EM": "1.5",
45
+ "PM-0.5": "15.0",
46
+ "Tokens": "1486"
47
+ },
48
+ "Drop_Quote": {
49
+ "CR": "98.0",
50
+ "S-Acc": "21.9",
51
+ "EM": "0.0",
52
+ "PM-0.5": "4.0",
53
+ "Tokens": "1852"
54
+ }
55
+ }
56
+ }
src/envs.py CHANGED
@@ -18,9 +18,9 @@ CACHE_PATH=os.getenv("HF_HOME", ".")
18
 
19
  # Local caches
20
  # EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
- EVAL_REQUESTS_PATH = "/data_jhchen/Leaderboard/Requests"
22
  # EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
23
- EVAL_RESULTS_PATH = "/data_jhchen/Leaderboard/LR2Bench"
24
  # EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
25
  # EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
26
 
 
18
 
19
  # Local caches
20
  # EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
+ EVAL_REQUESTS_PATH = "./requests"
22
  # EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
23
+ EVAL_RESULTS_PATH = "./results"
24
  # EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
25
  # EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
26