File size: 21,266 Bytes
94a1f00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
715bb12
 
 
 
94a1f00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
715bb12
94a1f00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
915fcba
 
 
 
94a1f00
915fcba
 
 
 
94a1f00
915fcba
 
 
 
 
 
 
 
 
94a1f00
 
 
 
 
 
 
915fcba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94a1f00
 
 
915fcba
94a1f00
 
 
 
915fcba
 
 
94a1f00
 
915fcba
 
 
 
94a1f00
 
 
915fcba
 
 
94a1f00
 
 
 
 
 
 
 
 
915fcba
 
 
 
 
 
 
 
 
 
94a1f00
915fcba
 
 
94a1f00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2443343
94a1f00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
"""
Glicko-2 Ranking System for Device Performance Comparison

This module implements a Glicko-2 based ranking system for comparing device performance
in benchmark tests. Glicko-2 is an improvement over the original Glicko system and Elo,
providing better handling of rating uncertainty and volatility.

The system:
1. Filters out emulators and iOS devices with insufficient GPU layers
2. Normalizes scores within each model group
3. Computes Glicko-2 ratings for devices based on their performance
4. Provides uncertainty metrics alongside ratings
5. Supports both combined and separate analysis of Token Generation and Prompt Processing
"""

import numpy as np
import pandas as pd
from collections import defaultdict
from typing import Tuple, Dict, List, Optional
import glicko2
import streamlit as st

# Cache configuration
CACHE_DURATION = 36000  # Cache duration in seconds (10 hours)


def preprocess_benchmark_data(
    df: pd.DataFrame,
    min_gpu_layers: int = 20,
    pp_config: int = 512,
    tg_config: int = 128,
) -> pd.DataFrame:
    """
    Preprocess benchmark data by filtering out invalid entries.

    Args:
        df: DataFrame containing benchmark data
        min_gpu_layers: Minimum number of GPU layers required for iOS devices
        pp_config: Prompt Processing configuration to filter for
        tg_config: Token Generation configuration to filter for

    Returns:
        Filtered DataFrame containing only valid benchmark entries
    """
    # Create a mask for devices to keep
    keep_device = (
        # Keep non-iOS devices
        (
            (df["Platform"] != "iOS")
            |
            # Keep iOS devices with sufficient GPU layers
            ((df["Platform"] == "iOS") & (df["n_gpu_layers"] >= min_gpu_layers))
        )
        &
        # Remove emulators
        (~df["Normalized Device ID"].str.contains("Emulator", case=False, na=False))
        &
        # Filter by configuration
        (df["PP Config"] == pp_config)
        & (df["TG Config"] == tg_config)
    )

    filtered_df = df[keep_device].copy()

    # Print filtering statistics
    total_devices = df["Normalized Device ID"].nunique()
    filtered_devices = filtered_df["Normalized Device ID"].nunique()
    emulator_devices = df[
        df["Normalized Device ID"].str.contains("Emulator", case=False, na=False)
    ]["Normalized Device ID"].nunique()

    print("Filtering Statistics:")
    print(f"Original devices: {total_devices}")
    print(f"Emulator devices removed: {emulator_devices}")
    print(
        f"iOS devices with insufficient GPU layers removed: "
        f"{total_devices - filtered_devices - emulator_devices}"
    )
    print(f"Final device count: {filtered_devices}")

    # Print removed devices for verification
    print(
        f"Removed {set(df['Normalized Device ID'].unique()) - set(filtered_df['Normalized Device ID'].unique())} "
    )

    return filtered_df


def compute_glicko2_rankings(
    df: pd.DataFrame, token_weight: float = 0.6
) -> pd.DataFrame:
    """
    Compute device rankings using Glicko-2 rating system.

    Args:
        df: DataFrame containing benchmark data
        token_weight: Weight for Token Generation in combined score (0.0 to 1.0)

    Returns:
        DataFrame containing device rankings and statistics
    """
    # Initialize Glicko-2 ratings for all devices
    ratings = {}
    match_counts = defaultdict(int)
    win_counts = defaultdict(int)
    loss_counts = defaultdict(int)

    # Default Glicko-2 settings
    # Rating = 1500, RD (rating deviation) = 350, Volatility = 0.06
    def create_glicko2_rating():
        return glicko2.Player(rating=1500, rd=350, vol=0.06)

    def normalize_scores(group: pd.DataFrame) -> pd.Series:
        """Normalize and combine scores within a model group"""
        # Normalize Token Generation (higher is better)
        token_min = group["Token Generation"].min()
        token_max = group["Token Generation"].max()
        token_norm = (
            (group["Token Generation"] - token_min) / (token_max - token_min)
            if token_max > token_min
            else 0
        )

        # Normalize Prompt Processing (higher is better)
        prompt_min = group["Prompt Processing"].min()
        prompt_max = group["Prompt Processing"].max()
        prompt_norm = (
            (group["Prompt Processing"] - prompt_min) / (prompt_max - prompt_min)
            if prompt_max > prompt_min
            else 0
        )

        # Combine scores
        return token_weight * token_norm + (1 - token_weight) * prompt_norm

    # Get all unique devices
    all_devices = df["Normalized Device ID"].unique()

    # Initialize ratings for all devices
    for device in all_devices:
        ratings[device] = create_glicko2_rating()

    # Process each model separately
    for model, group in df.groupby("Model ID"):
        # Add normalized combined score
        group.loc[:, "combined_score"] = normalize_scores(group)

        devices = group["Normalized Device ID"].unique()

        # In Glicko-2, we need to collect all results for a rating period before updating
        # A rating period could be all matches for a specific model
        device_matches = defaultdict(
            lambda: {"opponent_ratings": [], "opponent_rds": [], "outcomes": []}
        )

        for i in range(len(devices)):
            for j in range(i + 1, len(devices)):
                device1 = devices[i]
                device2 = devices[j]

                score1 = group[group["Normalized Device ID"] == device1][
                    "combined_score"
                ].iloc[0]
                score2 = group[group["Normalized Device ID"] == device2][
                    "combined_score"
                ].iloc[0]

                # Update match counts
                match_counts[device1] += 1
                match_counts[device2] += 1

                # Determine outcome (0 = loss, 1 = win, 0.5 = draw)
                if score1 > score2:
                    # Device 1 wins
                    outcome = 1
                    win_counts[device1] += 1
                    loss_counts[device2] += 1
                    # For device 1
                    device_matches[device1]["opponent_ratings"].append(
                        ratings[device2].rating
                    )
                    device_matches[device1]["opponent_rds"].append(ratings[device2].rd)
                    device_matches[device1]["outcomes"].append(outcome)
                    # For device 2
                    device_matches[device2]["opponent_ratings"].append(
                        ratings[device1].rating
                    )
                    device_matches[device2]["opponent_rds"].append(ratings[device1].rd)
                    device_matches[device2]["outcomes"].append(0)  # Loss
                elif score1 < score2:
                    # Device 2 wins
                    outcome = 0
                    win_counts[device2] += 1
                    loss_counts[device1] += 1
                    # For device 1
                    device_matches[device1]["opponent_ratings"].append(
                        ratings[device2].rating
                    )
                    device_matches[device1]["opponent_rds"].append(ratings[device2].rd)
                    device_matches[device1]["outcomes"].append(outcome)
                    # For device 2
                    device_matches[device2]["opponent_ratings"].append(
                        ratings[device1].rating
                    )
                    device_matches[device2]["opponent_rds"].append(ratings[device1].rd)
                    device_matches[device2]["outcomes"].append(1)  # Win
                else:
                    # It's a draw
                    outcome = 0.5
                    # For device 1
                    device_matches[device1]["opponent_ratings"].append(
                        ratings[device2].rating
                    )
                    device_matches[device1]["opponent_rds"].append(ratings[device2].rd)
                    device_matches[device1]["outcomes"].append(outcome)
                    # For device 2
                    device_matches[device2]["opponent_ratings"].append(
                        ratings[device1].rating
                    )
                    device_matches[device2]["opponent_rds"].append(ratings[device1].rd)
                    device_matches[device2]["outcomes"].append(outcome)

        # Update ratings after the model rating period
        for device, matches in device_matches.items():
            if matches[
                "opponent_ratings"
            ]:  # Only update if the device had matches in this period
                # Update the rating with the three separate lists that the API requires
                ratings[device].update_player(
                    matches["opponent_ratings"],  # List of opponent ratings
                    matches["opponent_rds"],  # List of opponent rating deviations
                    matches["outcomes"],  # List of outcomes
                )

    # Convert to DataFrame
    ranking_data = []
    for device, rating in ratings.items():
        if match_counts[device] > 0:  # Only include devices with matches
            ranking_data.append(
                {
                    "device": device,
                    "rating": rating.rating,
                    "rd": rating.rd,  # rating deviation (uncertainty)
                    "volatility": rating.vol,
                    "matches": match_counts[device],
                    "wins": win_counts[device],
                    "losses": loss_counts[device],
                    # Conservative rating (95% confidence lower bound)
                    "conserv_rating": rating.rating - (2 * rating.rd),
                }
            )

    # Create DataFrame
    ranking_df = pd.DataFrame(ranking_data)

    if len(ranking_df) > 0:
        # Add win rate
        ranking_df["win_rate"] = ranking_df["wins"] / ranking_df["matches"]

        # Add platform information
        ranking_df["Platform"] = pd.Series(
            {
                row["device"]: df[df["Normalized Device ID"] == row["device"]][
                    "Platform"
                ].iloc[0]
                for _, row in ranking_df.iterrows()
            }
        )

        # Set device as index
        ranking_df = ranking_df.set_index("device")

    return ranking_df


@st.cache_data(ttl=CACHE_DURATION)
def analyze_glicko2_rankings(
    df: pd.DataFrame,
    min_matches: int = 5,
    min_gpu_layers: int = 20,
    pp_config: int = 512,
    tg_config: int = 128,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
    """
    Analyze and display ranking results with Glicko-2 ratings.

    Args:
        df: DataFrame containing benchmark data
        min_matches: Minimum number of matches required for confident rankings
        min_gpu_layers: Minimum number of GPU layers required for iOS devices
        pp_config: Prompt Processing configuration to filter for
        tg_config: Token Generation configuration to filter for

    Returns:
        Tuple of (all rankings DataFrame, confident rankings DataFrame)
    """
    # First filter the data
    filtered_df = preprocess_benchmark_data(df, min_gpu_layers, pp_config, tg_config)

    # Compute rankings for all three scenarios
    combined_rankings = compute_glicko2_rankings(filtered_df, token_weight=0.6)
    token_rankings = compute_glicko2_rankings(filtered_df, token_weight=1.0)
    prompt_rankings = compute_glicko2_rankings(filtered_df, token_weight=0.0)

    # Rename columns to avoid confusion
    combined_rankings = combined_rankings.rename(
        columns={
            "rating": "combined_rating",
            "rd": "combined_rd",
            "volatility": "combined_vol",
            "conserv_rating": "combined_conserv",
            "wins": "combined_wins",
            "losses": "combined_losses",
            "win_rate": "combined_win_rate",
        }
    )

    token_rankings = token_rankings.rename(
        columns={
            "rating": "token_rating",
            "rd": "token_rd",
            "volatility": "token_vol",
            "conserv_rating": "token_conserv",
            "wins": "token_wins",
            "losses": "token_losses",
            "win_rate": "token_win_rate",
        }
    )

    prompt_rankings = prompt_rankings.rename(
        columns={
            "rating": "prompt_rating",
            "rd": "prompt_rd",
            "volatility": "prompt_vol",
            "conserv_rating": "prompt_conserv",
            "wins": "prompt_wins",
            "losses": "prompt_losses",
            "win_rate": "prompt_win_rate",
        }
    )

    # Combine all rankings into one DataFrame
    # We'll keep one set of match counts as they should be the same
    rankings = combined_rankings.copy()

    # Add token generation rankings
    for col in [
        "token_rating",
        "token_rd",
        "token_vol",
        "token_conserv",
        "token_wins",
        "token_losses",
        "token_win_rate",
    ]:
        rankings[col] = token_rankings[col]

    # Add prompt processing rankings
    for col in [
        "prompt_rating",
        "prompt_rd",
        "prompt_vol",
        "prompt_conserv",
        "prompt_wins",
        "prompt_losses",
        "prompt_win_rate",
    ]:
        rankings[col] = prompt_rankings[col]

    # Filter for minimum matches
    confident_rankings = rankings[rankings["matches"] >= min_matches].sort_values(
        "combined_rating", ascending=False
    )

    return rankings, confident_rankings


def analyze_device_glicko2_matches(
    df: pd.DataFrame,
    device_id1: str,
    device_id2: Optional[str] = None,
    token_weight: float = 0.6,
) -> pd.DataFrame:
    """
    Analyze all matches for one or two specific devices using the Glicko-2 methodology.

    Args:
        df: DataFrame containing benchmark data
        device_id1: First device ID to analyze
        device_id2: Optional second device ID to compare against
        token_weight: Weight for Token Generation in combined score (0.0 to 1.0)

    Returns:
        DataFrame containing detailed match information with win probabilities
    """
    matches = []

    def normalize_scores(group: pd.DataFrame) -> Dict[str, Dict]:
        """Normalize scores within a model group and return as dict"""
        # Normalize Token Generation (higher is better)
        token_min = group["Token Generation"].min()
        token_max = group["Token Generation"].max()
        token_range = token_max - token_min

        # Normalize Prompt Processing (higher is better)
        prompt_min = group["Prompt Processing"].min()
        prompt_max = group["Prompt Processing"].max()
        prompt_range = prompt_max - prompt_min

        # Calculate normalized scores for each device
        result = {}
        for _, row in group.iterrows():
            device_id = row["Normalized Device ID"]

            # Calculate token normalization
            token_norm = 0
            if token_range > 0:
                token_norm = (row["Token Generation"] - token_min) / token_range

            # Calculate prompt normalization
            prompt_norm = 0
            if prompt_range > 0:
                prompt_norm = (row["Prompt Processing"] - prompt_min) / prompt_range

            # Calculate combined score regardless of ranges
            combined = token_weight * token_norm + (1 - token_weight) * prompt_norm

            result[device_id] = {
                "token_norm": token_norm,
                "prompt_norm": prompt_norm,
                "combined": combined,
            }
        return result

    # Group by Model ID to compare within same models
    for model, group in df.groupby("Model ID"):
        if device_id1 not in group["Normalized Device ID"].values:
            continue

        # Aggregate multiple entries per device by taking the maximum performance
        # This ensures we use the best performance for each device
        device_agg = (
            group.groupby("Normalized Device ID")
            .agg(
                {
                    "Token Generation": "max",
                    "Prompt Processing": "max",
                    "n_gpu_layers": "first",
                    "Platform": "first",
                    "Model File Size": "first",
                }
            )
            .reset_index()
        )

        # Check if device_id1 exists in the aggregated data
        if device_id1 not in device_agg["Normalized Device ID"].values:
            continue

        device1_data = device_agg[
            device_agg["Normalized Device ID"] == device_id1
        ].iloc[0]

        # If device2 specified, only compare those two
        if device_id2 is not None:
            if device_id2 not in device_agg["Normalized Device ID"].values:
                continue
            devices_to_compare = [device_id2]
        else:
            devices_to_compare = [
                d
                for d in device_agg["Normalized Device ID"].unique()
                if d != device_id1
            ]


        # Get normalized scores using the aggregated data
        norm_scores = normalize_scores(device_agg)
        # print("norm_scores: ", norm_scores)

        # Compare with other devices
        for other_device in devices_to_compare:
            device2_data = device_agg[
                device_agg["Normalized Device ID"] == other_device
            ].iloc[0]

            # Skip if normalization failed
            if device_id1 not in norm_scores or other_device not in norm_scores:
                continue

            # Get normalized scores
            scores1 = norm_scores[device_id1]
            scores2 = norm_scores[other_device]

            # Calculate win probabilities based on normalized scores
            # For token generation
            token_advantage = scores1["token_norm"] - scores2["token_norm"]
            token_prob = 1 / (
                1 + 10 ** (-6 * token_advantage)
            )  # Higher advantage means higher win probability

            # For prompt processing
            prompt_advantage = scores1["prompt_norm"] - scores2["prompt_norm"]
            prompt_prob = 1 / (1 + 10 ** (-6 * prompt_advantage))

            # For combined score
            combined_advantage = scores1["combined"] - scores2["combined"]
            combined_prob = 1 / (1 + 10 ** (-6 * combined_advantage))
            # Determine winners
            token_winner = (
                device_id1
                if device1_data["Token Generation"] > device2_data["Token Generation"]
                else (
                    other_device
                    if device2_data["Token Generation"]
                    > device1_data["Token Generation"]
                    else "Tie"
                )
            )
            prompt_winner = (
                device_id1
                if device1_data["Prompt Processing"] > device2_data["Prompt Processing"]
                else (
                    other_device
                    if device2_data["Prompt Processing"]
                    > device1_data["Prompt Processing"]
                    else "Tie"
                )
            )
            combined_winner = (
                device_id1
                if scores1["combined"] > scores2["combined"]
                else (
                    other_device if scores2["combined"] > scores1["combined"] else "Tie"
                )
            )

            matches.append(
                {
                    "Model": model,
                    "Device 1": device_id1,
                    "Device 2": other_device,
                    "n_gpu_layers 1": device1_data["n_gpu_layers"],
                    "n_gpu_layers 2": device2_data["n_gpu_layers"],
                    "Token Generation 1": device1_data["Token Generation"],
                    "Token Generation 2": device2_data["Token Generation"],
                    "Token Winner": token_winner,
                    "Token Win Prob": token_prob,
                    "Prompt Processing 1": device1_data["Prompt Processing"],
                    "Prompt Processing 2": device2_data["Prompt Processing"],
                    "Prompt Winner": prompt_winner,
                    "Prompt Win Prob": prompt_prob,
                    "Combined Winner": combined_winner,
                    "Combined Win Prob": combined_prob,
                    "Platform 1": device1_data["Platform"],
                    "Platform 2": device2_data["Platform"],
                    "Model File Size": device1_data["Model File Size"],
                }
            )

    matches_df = pd.DataFrame(matches)

    if len(matches_df) > 0:
        return matches_df
    else:
        print(
            f"No matches found for device {device_id1}"
            + (f" against {device_id2}" if device_id2 else "")
        )
        return pd.DataFrame()


if __name__ == "__main__":
    # Example usage
    print("This module provides Glicko-2 ranking for device performance.")
    print("Import and use the functions in your own code.")
    print("Example:")
    print("  from glicko2_ranking import analyze_glicko2_rankings")
    print("  rankings, confident_rankings = analyze_glicko2_rankings(df)")