Spaces:
Sleeping
Sleeping
import marimo | |
__generated_with = "0.11.26" | |
app = marimo.App(width="full") | |
def _(): | |
import pandas as pd | |
import numpy as np | |
import matplotlib.pyplot as plt | |
import seaborn as sns | |
import altair as alt | |
return alt, np, pd, plt, sns | |
def _(platforms_data): | |
# Complete the platform data with GC AI, Notebook LM, and Vecflow | |
platforms_data.update( | |
{ | |
'GC AI': { | |
'metrics': [ | |
{'metric': 'Pass Rate (Arthur)', 'value': 60}, | |
{'metric': 'Pass Rate (Anna)', 'value': 40}, | |
{'metric': 'Helpfulness (Arthur)', 'value': 1.4 * 50}, | |
{'metric': 'Helpfulness (Anna)', 'value': 0.5 * 50}, | |
{'metric': 'Adequate Length (Arthur)', 'value': 1.8 * 50}, | |
{'metric': 'Adequate Length (Anna)', 'value': 1.0 * 50}, | |
], | |
'performance': [ | |
{'task': 'Task #6', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #13', 'arthur': 0, 'anna': 0}, | |
{'task': 'Task #18', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #19', 'arthur': 0, 'anna': 0}, | |
{'task': 'Task #20', 'arthur': 6, 'anna': 0}, | |
], | |
'strengths': [ | |
'Good adequate length rating from Arthur', | |
'Decent pass rate from Arthur (60%)', | |
'Solid helpfulness score from Arthur', | |
], | |
'weaknesses': [ | |
'Lowest helpfulness rating from Anna (0.5/2.0)', | |
'Largest discrepancy between evaluators', | |
'Lower pass rate from Anna (40%)', | |
], | |
}, | |
'Notebook LM': { | |
'metrics': [ | |
{'metric': 'Pass Rate (Arthur)', 'value': 60}, | |
{'metric': 'Pass Rate (Anna)', 'value': 60}, | |
{'metric': 'Helpfulness (Arthur)', 'value': 0.8 * 50}, | |
{'metric': 'Helpfulness (Anna)', 'value': 1.2 * 50}, | |
{'metric': 'Adequate Length (Arthur)', 'value': 1.6 * 50}, | |
{'metric': 'Adequate Length (Anna)', 'value': 2.0 * 50}, | |
], | |
'performance': [ | |
{'task': 'Task #3', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #6', 'arthur': 0, 'anna': 0}, | |
{'task': 'Task #11', 'arthur': 0, 'anna': 6}, | |
{'task': 'Task #13', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #15', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #19', 'arthur': 6, 'anna': 6}, | |
], | |
'strengths': [ | |
'Perfect agreement between Arthur and Anna on pass/fail', | |
'Highest adequate length rating from Anna (2.0/2.0)', | |
'Consistent pass rate between evaluators (60%)', | |
], | |
'weaknesses': [ | |
'Lower helpfulness rating from Arthur (0.8/2.0)', | |
'Mixed performance in specific tasks', | |
], | |
}, | |
'Vecflow': { | |
'metrics': [ | |
{'metric': 'Pass Rate (Arthur)', 'value': 60}, | |
{'metric': 'Pass Rate (Anna)', 'value': 40}, | |
{'metric': 'Helpfulness (Arthur)', 'value': 0.6 * 50}, | |
{'metric': 'Helpfulness (Anna)', 'value': 0.6 * 50}, | |
{'metric': 'Adequate Length (Arthur)', 'value': 1.8 * 50}, | |
{'metric': 'Adequate Length (Anna)', 'value': 1.4 * 50}, | |
], | |
'performance': [ | |
{'task': 'Task #11', 'arthur': 0, 'anna': 6}, | |
{'task': 'Task #13', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #15', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #18', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #19', 'arthur': 0, 'anna': 0}, | |
], | |
'strengths': [ | |
'Perfect agreement on helpfulness between evaluators', | |
'Strong adequate length scores from both evaluators', | |
'Good performance in specialized tasks', | |
], | |
'weaknesses': [ | |
'Lowest helpfulness rating overall (0.6/2.0)', | |
'Lower pass rate from Anna (40%)', | |
'Inconsistent evaluation on complex tasks', | |
], | |
}, | |
} | |
) | |
return | |
def _(): | |
# Platform data | |
platforms_data = { | |
'Chat GPT': { | |
'metrics': [ | |
{'metric': 'Pass Rate (Arthur)', 'value': 100}, | |
{'metric': 'Pass Rate (Anna)', 'value': 40}, | |
{'metric': 'Helpfulness (Arthur)', 'value': 1.5 * 50}, # Scaling to 0-100 | |
{'metric': 'Helpfulness (Anna)', 'value': 1.25 * 50}, | |
{'metric': 'Adequate Length (Arthur)', 'value': 1.75 * 50}, | |
{'metric': 'Adequate Length (Anna)', 'value': 1.25 * 50}, | |
], | |
'performance': [{'task': 'Task #1', 'arthur': 6, 'anna': 0}, {'task': 'Task #3', 'arthur': 6, 'anna': 0}], | |
'strengths': [ | |
'High pass rate from Arthur (100%)', | |
'Strong helpfulness ratings from both evaluators', | |
'Good adequate length scores', | |
], | |
'weaknesses': ['Lower pass rate from Anna (40%)', 'Inconsistent evaluation between Arthur and Anna'], | |
}, | |
'CoPilot': { | |
'metrics': [ | |
{'metric': 'Pass Rate (Arthur)', 'value': 40}, | |
{'metric': 'Pass Rate (Anna)', 'value': 60}, | |
{'metric': 'Helpfulness (Arthur)', 'value': 1.0 * 50}, | |
{'metric': 'Helpfulness (Anna)', 'value': 1.33 * 50}, | |
{'metric': 'Adequate Length (Arthur)', 'value': 1.2 * 50}, | |
{'metric': 'Adequate Length (Anna)', 'value': 1.33 * 50}, | |
], | |
'performance': [ | |
{'task': 'Task #1', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #11', 'arthur': 0, 'anna': 6}, | |
{'task': 'Task #15', 'arthur': 0, 'anna': 0}, | |
{'task': 'Task #18', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #20', 'arthur': 0, 'anna': 6}, | |
], | |
'strengths': [ | |
'Balanced helpfulness scores from both evaluators', | |
'Consistent adequate length ratings', | |
'Higher pass rate from Anna than from Arthur', | |
], | |
'weaknesses': ['Lower overall pass rates', 'Inconsistent evaluation between tasks', 'Below-average scores on complex tasks'], | |
}, | |
'DeepSeek': { | |
'metrics': [ | |
{'metric': 'Pass Rate (Arthur)', 'value': 75}, | |
{'metric': 'Pass Rate (Anna)', 'value': 100}, | |
{'metric': 'Helpfulness (Arthur)', 'value': 1.33 * 50}, | |
{'metric': 'Helpfulness (Anna)', 'value': 2.0 * 50}, | |
{'metric': 'Adequate Length (Arthur)', 'value': 2.0 * 50}, | |
{'metric': 'Adequate Length (Anna)', 'value': 1.67 * 50}, | |
], | |
'performance': [ | |
{'task': 'Task #11', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #13', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #18', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #19', 'arthur': 0, 'anna': 6}, | |
], | |
'strengths': [ | |
'Perfect pass rate from Anna (100%)', | |
'Highest helpfulness rating from Anna (2.0/2.0)', | |
'Highest adequate length rating from Arthur (2.0/2.0)', | |
'Strong overall performance across metrics', | |
], | |
'weaknesses': ['Some inconsistency between evaluators', 'Lower pass rate from Arthur compared to Anna'], | |
}, | |
} | |
return (platforms_data,) | |
def _(platforms_data): | |
# Complete the platform data with GC AI, Notebook LM, and Vecflow | |
platforms_data.update( | |
{ | |
'GC AI': { | |
'metrics': [ | |
{'metric': 'Pass Rate (Arthur)', 'value': 60}, | |
{'metric': 'Pass Rate (Anna)', 'value': 40}, | |
{'metric': 'Helpfulness (Arthur)', 'value': 1.4 * 50}, | |
{'metric': 'Helpfulness (Anna)', 'value': 0.5 * 50}, | |
{'metric': 'Adequate Length (Arthur)', 'value': 1.8 * 50}, | |
{'metric': 'Adequate Length (Anna)', 'value': 1.0 * 50}, | |
], | |
'performance': [ | |
{'task': 'Task #6', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #13', 'arthur': 0, 'anna': 0}, | |
{'task': 'Task #18', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #19', 'arthur': 0, 'anna': 0}, | |
{'task': 'Task #20', 'arthur': 6, 'anna': 0}, | |
], | |
'strengths': [ | |
'Good adequate length rating from Arthur', | |
'Decent pass rate from Arthur (60%)', | |
'Solid helpfulness score from Arthur', | |
], | |
'weaknesses': [ | |
'Lowest helpfulness rating from Anna (0.5/2.0)', | |
'Largest discrepancy between evaluators', | |
'Lower pass rate from Anna (40%)', | |
], | |
}, | |
'Notebook LM': { | |
'metrics': [ | |
{'metric': 'Pass Rate (Arthur)', 'value': 60}, | |
{'metric': 'Pass Rate (Anna)', 'value': 60}, | |
{'metric': 'Helpfulness (Arthur)', 'value': 0.8 * 50}, | |
{'metric': 'Helpfulness (Anna)', 'value': 1.2 * 50}, | |
{'metric': 'Adequate Length (Arthur)', 'value': 1.6 * 50}, | |
{'metric': 'Adequate Length (Anna)', 'value': 2.0 * 50}, | |
], | |
'performance': [ | |
{'task': 'Task #3', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #6', 'arthur': 0, 'anna': 0}, | |
{'task': 'Task #11', 'arthur': 0, 'anna': 6}, | |
{'task': 'Task #13', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #15', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #19', 'arthur': 6, 'anna': 6}, | |
], | |
'strengths': [ | |
'Perfect agreement between Arthur and Anna on pass/fail', | |
'Highest adequate length rating from Anna (2.0/2.0)', | |
'Consistent pass rate between evaluators (60%)', | |
], | |
'weaknesses': [ | |
'Lower helpfulness rating from Arthur (0.8/2.0)', | |
'Mixed performance in specific tasks', | |
], | |
}, | |
'Vecflow': { | |
'metrics': [ | |
{'metric': 'Pass Rate (Arthur)', 'value': 60}, | |
{'metric': 'Pass Rate (Anna)', 'value': 40}, | |
{'metric': 'Helpfulness (Arthur)', 'value': 0.6 * 50}, | |
{'metric': 'Helpfulness (Anna)', 'value': 0.6 * 50}, | |
{'metric': 'Adequate Length (Arthur)', 'value': 1.8 * 50}, | |
{'metric': 'Adequate Length (Anna)', 'value': 1.4 * 50}, | |
], | |
'performance': [ | |
{'task': 'Task #11', 'arthur': 0, 'anna': 6}, | |
{'task': 'Task #13', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #15', 'arthur': 6, 'anna': 0}, | |
{'task': 'Task #18', 'arthur': 6, 'anna': 6}, | |
{'task': 'Task #19', 'arthur': 0, 'anna': 0}, | |
], | |
'strengths': [ | |
'Perfect agreement on helpfulness between evaluators', | |
'Strong adequate length scores from both evaluators', | |
'Good performance in specialized tasks', | |
], | |
'weaknesses': [ | |
'Lowest helpfulness rating overall (0.6/2.0)', | |
'Lower pass rate from Anna (40%)', | |
'Inconsistent evaluation on complex tasks', | |
], | |
}, | |
} | |
) | |
return | |
def _(pd): | |
# Task type data | |
task_type_data = pd.DataFrame( | |
[ | |
{'name': 'Simple Extraction', 'arthur': 80, 'anna': 70}, | |
{'name': 'Complex Analysis', 'arthur': 65, 'anna': 60}, | |
{'name': 'Regulatory/Legal', 'arthur': 50, 'anna': 40}, | |
{'name': 'Identification', 'arthur': 90, 'anna': 75}, | |
{'name': 'Summarization', 'arthur': 70, 'anna': 65}, | |
] | |
) | |
# Platform performance over time data | |
trend_data = { | |
'Chat GPT': [ | |
{'task': 1, 'arthur': 6, 'anna': 0}, | |
{'task': 3, 'arthur': 6, 'anna': 0}, | |
{'task': 11, 'arthur': 6, 'anna': 0}, | |
{'task': 13, 'arthur': 6, 'anna': 6}, | |
{'task': 18, 'arthur': 6, 'anna': 6}, | |
], | |
'CoPilot': [ | |
{'task': 1, 'arthur': 6, 'anna': 6}, | |
{'task': 11, 'arthur': 0, 'anna': 6}, | |
{'task': 15, 'arthur': 0, 'anna': 0}, | |
{'task': 18, 'arthur': 6, 'anna': 0}, | |
{'task': 20, 'arthur': 0, 'anna': 6}, | |
], | |
'DeepSeek': [ | |
{'task': 11, 'arthur': 6, 'anna': 6}, | |
{'task': 13, 'arthur': 6, 'anna': 0}, | |
{'task': 18, 'arthur': 6, 'anna': 6}, | |
{'task': 19, 'arthur': 0, 'anna': 6}, | |
], | |
'GC AI': [ | |
{'task': 6, 'arthur': 6, 'anna': 0}, | |
{'task': 13, 'arthur': 0, 'anna': 0}, | |
{'task': 18, 'arthur': 6, 'anna': 6}, | |
{'task': 19, 'arthur': 0, 'anna': 0}, | |
{'task': 20, 'arthur': 6, 'anna': 0}, | |
], | |
'Notebook LM': [ | |
{'task': 3, 'arthur': 6, 'anna': 0}, | |
{'task': 6, 'arthur': 0, 'anna': 0}, | |
{'task': 11, 'arthur': 0, 'anna': 6}, | |
{'task': 13, 'arthur': 6, 'anna': 6}, | |
{'task': 15, 'arthur': 6, 'anna': 6}, | |
{'task': 19, 'arthur': 6, 'anna': 6}, | |
], | |
'Vecflow': [ | |
{'task': 11, 'arthur': 0, 'anna': 6}, | |
{'task': 13, 'arthur': 6, 'anna': 0}, | |
{'task': 15, 'arthur': 6, 'anna': 0}, | |
{'task': 18, 'arthur': 6, 'anna': 6}, | |
{'task': 19, 'arthur': 0, 'anna': 0}, | |
], | |
} | |
# Map pass/fail values to binary for plotting | |
mapped_trend_data = {} | |
for platform, data in trend_data.items(): | |
mapped_trend_data[platform] = [ | |
{'task': item['task'], 'arthur': 1 if item['arthur'] == 6 else 0, 'anna': 1 if item['anna'] == 6 else 0} for item in data | |
] | |
return data, mapped_trend_data, platform, task_type_data, trend_data | |
def _(alt, mapped_trend_data, pd): | |
def plot_task_performance_interactive(platform_name): | |
"""Create an interactive line chart for task performance""" | |
# Convert to DataFrame | |
data = pd.DataFrame(mapped_trend_data[platform_name]) | |
# Melt the dataframe for Altair | |
data_melted = data.melt(id_vars=['task'], var_name='evaluator', value_name='result') | |
# Create a color scale | |
color_scale = alt.Scale(domain=['arthur', 'anna'], range=['#4c78a8', '#ff7f0e']) | |
# Create the chart | |
chart = ( | |
alt.Chart(data_melted) | |
.mark_line(point=True) | |
.encode( | |
x=alt.X('task:N', title='Task Number'), | |
y=alt.Y( | |
'result:N', title='Result', scale=alt.Scale(domain=[0, 1]), axis=alt.Axis(labelExpr="datum.value === 0 ? 'Fail' : 'Pass'") | |
), | |
color=alt.Color('evaluator:N', title='Evaluator', scale=color_scale, legend=alt.Legend(title='Evaluator')), | |
tooltip=['task', 'evaluator', alt.Tooltip('result', title='Result', format='.0f', formatType='number')], | |
) | |
.transform_calculate(result_label="datum.result === 0 ? 'Fail' : 'Pass'") | |
.properties(width=500, height=300, title=f'{platform_name} Task Performance') | |
.configure_title(fontSize=20, anchor='start') | |
.configure_axis(labelFontSize=12, titleFontSize=14) | |
.configure_point(size=100) | |
.interactive() | |
) | |
return chart | |
return (plot_task_performance_interactive,) | |
def _(alt, task_type_data): | |
def plot_task_type_performance_interactive(): | |
"""Create an interactive bar chart for task type performance""" | |
# Melt the dataframe for Altair | |
task_type_melted = task_type_data.melt(id_vars=['name'], var_name='evaluator', value_name='score') | |
# Create a color scale | |
color_scale = alt.Scale(domain=['arthur', 'anna'], range=['#4c78a8', '#ff7f0e']) | |
# Create the chart | |
chart = ( | |
alt.Chart(task_type_melted) | |
.mark_bar() | |
.encode( | |
x=alt.X('name:N', title='Task Type', axis=alt.Axis(labelAngle=-45)), | |
y=alt.Y('score:Q', title='Average Score (%)'), | |
color=alt.Color('evaluator:N', title='Evaluator', scale=color_scale), | |
tooltip=['name', 'evaluator', alt.Tooltip('score', title='Score', format='.0f')], | |
) | |
.properties(width=600, height=400, title='Task Type Performance Analysis') | |
.configure_title(fontSize=20, anchor='start') | |
.configure_axis(labelFontSize=12, titleFontSize=14) | |
.interactive() | |
) | |
return chart | |
return (plot_task_type_performance_interactive,) | |
def _( | |
display_platform_evaluation, | |
platform_summary, | |
plot_platform_radar_interactive, | |
plot_task_performance_interactive, | |
): | |
def analyze_platform_interactive(platform_name='DeepSeek'): | |
"""Create a comprehensive interactive analysis for a single platform""" | |
from IPython.display import display, HTML, Markdown | |
# Display the platform name | |
display(Markdown(f'# AI Platform In-Depth Analysis: {platform_name}')) | |
# Create the radar chart for metrics | |
display(Markdown('## Performance Metrics')) | |
display(plot_platform_radar_interactive(platform_name)) | |
# Show task performance | |
display(Markdown('## Task Performance')) | |
display(plot_task_performance_interactive(platform_name)) | |
# Display strengths and weaknesses | |
display(Markdown('## Platform Evaluation')) | |
display_platform_evaluation(platform_name) | |
# Show platform summary | |
display(Markdown('## Platform Summary')) | |
platform_summary(platform_name) | |
return None | |
return (analyze_platform_interactive,) | |
def _(analyze_platform_interactive): | |
# Analyze Vecflow | |
analyze_platform_interactive('Vecflow') | |
return | |
def _(analyze_platform_interactive): | |
# Analyze Vecflow | |
analyze_platform_interactive('Vecflow') | |
return | |
def _(analyze_platform_interactive): | |
# Analyze Notebook LM | |
analyze_platform_interactive('Notebook LM') | |
return | |
def _(analyze_platform_interactive): | |
# Analyze GC AI | |
analyze_platform_interactive('GC AI') | |
return | |
def _(analyze_platform_interactive): | |
# Analyze CoPilot | |
analyze_platform_interactive('CoPilot') | |
return | |
def _(analyze_platform_interactive): | |
# Analyze Chat GPT | |
analyze_platform_interactive('Chat GPT') | |
return | |
def _(analyze_platform_interactive): | |
# Analyze DeepSeek | |
analyze_platform_interactive('DeepSeek') | |
return | |
def _(compare_all_platforms_interactive): | |
# Compare all platforms | |
compare_all_platforms_interactive() | |
return | |
def _( | |
compare_platforms_interactive, | |
pd, | |
platforms_data, | |
plot_task_type_performance_interactive, | |
): | |
def compare_all_platforms_interactive(): | |
"""Display interactive comparison of all platforms""" | |
from IPython.display import display, Markdown | |
# Display the title | |
display(Markdown('# AI Platform Comparison')) | |
# Show interactive comparison chart | |
display(Markdown('## Metrics Comparison')) | |
display(compare_platforms_interactive()) | |
# Show task type performance | |
display(Markdown('## Task Type Performance')) | |
display(plot_task_type_performance_interactive()) | |
# Overall rankings | |
display(Markdown('## Overall Platform Rankings')) | |
# Calculate average metrics for each platform | |
rankings = [] | |
for platform, data in platforms_data.items(): | |
avg_metrics = sum(metric['value'] for metric in data['metrics']) / len(data['metrics']) | |
rankings.append({'Platform': platform, 'Average Score': avg_metrics}) | |
rankings_df = pd.DataFrame(rankings) | |
rankings_df.sort_values('Average Score', ascending=False, inplace=True) | |
# Create a DataFrame to display rankings | |
for i, (idx, row) in enumerate(rankings_df.iterrows(), 1): | |
print(f'{i}. {row["Platform"]} - Average Score: {row["Average Score"]:.2f}') | |
return None | |
return (compare_all_platforms_interactive,) | |
def _(alt, pd, platforms_data): | |
def compare_platforms_interactive(): | |
"""Create an interactive chart for comparing all platforms""" | |
# Create a DataFrame with all platform metrics | |
metrics_comparison = [] | |
for platform, data in platforms_data.items(): | |
for metric in data['metrics']: | |
metrics_comparison.append({'Platform': platform, 'Metric': metric['metric'], 'Value': metric['value']}) | |
comparison_df = pd.DataFrame(metrics_comparison) | |
# Create a grouped bar chart | |
chart = ( | |
alt.Chart(comparison_df) | |
.mark_bar() | |
.encode( | |
x=alt.X('Platform:N', title='Platform'), | |
y=alt.Y('Value:Q', title='Score'), | |
color=alt.Color('Platform:N', legend=None), | |
column=alt.Column('Metric:N', title=None), | |
tooltip=['Platform', 'Metric', 'Value'], | |
) | |
.properties(width=100, title='Platform Metric Comparison') | |
.configure_title(fontSize=20, anchor='start') | |
.configure_axis(labelFontSize=12, titleFontSize=14) | |
.interactive() | |
) | |
return chart | |
return (compare_platforms_interactive,) | |
def _(alt, pd, platforms_data): | |
def plot_platform_radar_interactive(platform_name): | |
"""Create an interactive radar chart for platform metrics using Altair""" | |
# Get platform metrics data | |
metrics = platforms_data[platform_name]['metrics'] | |
# Convert to long format for Altair | |
metrics_df = pd.DataFrame(metrics) | |
# Create the base chart | |
chart = ( | |
alt.Chart(metrics_df) | |
.mark_line(point=True) | |
.encode( | |
x=alt.X('metric:N', title=None, sort=None), | |
y=alt.Y('value:Q', scale=alt.Scale(domain=[0, 100]), title='Score'), | |
color=alt.value('#4c78a8'), | |
tooltip=['metric', 'value'], | |
) | |
.properties(width=500, height=400, title=f'{platform_name} Performance Metrics') | |
.configure_title(fontSize=20, anchor='start') | |
.configure_axis(labelFontSize=12, titleFontSize=14) | |
.configure_point(size=100) | |
.interactive() | |
) | |
return chart | |
return (plot_platform_radar_interactive,) | |
def _(alt): | |
alt.renderers.enable('default') | |
return | |
def _(compare_all_platforms): | |
# Compare all platforms | |
compare_all_platforms() | |
return | |
def _(pd, platforms_data, plot_task_type_performance, plt): | |
def compare_all_platforms(): | |
"""Display comparison of all platforms""" | |
# Create a DataFrame with all platform metrics for comparison | |
metrics_comparison = [] | |
for platform, data in platforms_data.items(): | |
# Extract metrics | |
platform_metrics = {metric['metric']: metric['value'] for metric in data['metrics']} | |
platform_metrics['Platform'] = platform | |
metrics_comparison.append(platform_metrics) | |
comparison_df = pd.DataFrame(metrics_comparison) | |
comparison_df.set_index('Platform', inplace=True) | |
# Display the comparison table | |
print('# AI Platform Comparison\n') | |
print('## Metrics Comparison') | |
print(comparison_df) | |
# Create a bar chart to compare platforms | |
plt.figure(figsize=(14, 8)) | |
comparison_df.plot(kind='bar', figsize=(14, 8)) | |
plt.title('Platform Metrics Comparison') | |
plt.xlabel('Platform') | |
plt.ylabel('Score') | |
plt.legend(title='Metrics', bbox_to_anchor=(1.05, 1), loc='upper left') | |
plt.tight_layout() | |
print('\n## Task Type Performance') | |
plot_task_type_performance() | |
# Overall rankings | |
print('\n## Overall Platform Rankings') | |
# Calculate average metrics for each platform | |
rankings = [] | |
for platform, data in platforms_data.items(): | |
avg_metrics = sum(metric['value'] for metric in data['metrics']) / len(data['metrics']) | |
rankings.append({'Platform': platform, 'Average Score': avg_metrics}) | |
rankings_df = pd.DataFrame(rankings) | |
rankings_df.sort_values('Average Score', ascending=False, inplace=True) | |
# Display rankings | |
for i, (idx, row) in enumerate(rankings_df.iterrows(), 1): | |
print(f'{i}. {row["Platform"]} - Average Score: {row["Average Score"]:.2f}') | |
return plt.gca() | |
return (compare_all_platforms,) | |
def _(compare_all_platforms): | |
# Compare all platforms | |
compare_all_platforms() | |
return | |
def _(platforms_data): | |
def platform_summary(platform_name): | |
"""Display a summary of the platform performance""" | |
summaries = { | |
'DeepSeek': 'DeepSeek shows the strongest overall performance across both evaluators, with a perfect pass rate from Anna and high marks on both helpfulness and adequate length metrics. It consistently delivers high-quality responses across various task types.', | |
'Chat GPT': "Chat GPT performs excellently according to Arthur with a perfect pass rate, but shows inconsistency with Anna's evaluations. Its strengths lie in helpfulness and adequate response length, particularly in extraction and summarization tasks.", | |
'Notebook LM': 'Notebook LM demonstrates the highest level of evaluator agreement with identical pass rates from Arthur and Anna. It excels in adequate length ratings but scores lower on helpfulness metrics from Arthur.', | |
'CoPilot': 'CoPilot shows moderate performance across metrics with slightly higher ratings from Anna than Arthur. It maintains consistency in adequate length but struggles with more complex analysis tasks.', | |
'GC AI': 'GC AI exhibits the largest discrepancy between evaluator ratings, with Arthur giving significantly higher scores than Anna across all metrics. It performs well in adequate length according to Arthur but scores poorly in helpfulness from Anna.', | |
'Vecflow': 'Vecflow demonstrates perfect agreement on helpfulness ratings between evaluators, though these scores are the lowest across all platforms. It excels in adequate length metrics but shows inconsistent pass rates between evaluators.', | |
} | |
# Create tags for the platform | |
tags = [] | |
metrics = platforms_data[platform_name]['metrics'] | |
tags.append(f'📊 {platform_name}') | |
if metrics[0]['value'] >= 60: | |
tags.append('🟢 High Arthur Pass Rate') | |
if metrics[1]['value'] >= 60: | |
tags.append('🟢 High Anna Pass Rate') | |
if metrics[2]['value'] / 50 >= 1.3: | |
tags.append('🟣 Strong Helpfulness (Arthur)') | |
if metrics[3]['value'] / 50 >= 1.3: | |
tags.append('🟣 Strong Helpfulness (Anna)') | |
if metrics[4]['value'] / 50 >= 1.7: | |
tags.append('🔵 Excellent Length (Arthur)') | |
if metrics[5]['value'] / 50 >= 1.7: | |
tags.append('🔵 Excellent Length (Anna)') | |
if metrics[0]['value'] == metrics[1]['value']: | |
tags.append('🟡 Evaluator Agreement') | |
print(f'== {platform_name} Summary ==\n') | |
print(summaries[platform_name]) | |
print('\nTags:') | |
print(' '.join(tags)) | |
return None | |
return (platform_summary,) | |
def _(np, platforms_data, plt): | |
def plot_platform_radar(platform_name): | |
"""Create a radar chart for platform metrics with enhanced styling""" | |
metrics = platforms_data[platform_name]['metrics'] | |
# Extract data | |
categories = [m['metric'] for m in metrics] | |
values = [m['value'] for m in metrics] | |
# Number of categories | |
N = len(categories) | |
# Create angle for each category | |
angles = [n / float(N) * 2 * np.pi for n in range(N)] | |
angles += angles[:1] # Close the loop | |
# Add the first value at the end to close the circle | |
values += values[:1] | |
# Create figure | |
fig, ax = plt.subplots(figsize=(10, 6), subplot_kw=dict(polar=True), facecolor='#f8f9fa') | |
# Draw the chart | |
ax.plot(angles, values, linewidth=2, linestyle='solid', label=platform_name, color='#8884d8') | |
ax.fill(angles, values, alpha=0.25, color='#8884d8') | |
# Set category labels | |
plt.xticks(angles[:-1], categories, size=10, fontweight='bold', color='#444444') | |
# Set y-axis limits | |
ax.set_ylim(0, 100) | |
# Add grid | |
ax.grid(color='#dddddd', linestyle='-', linewidth=0.5) | |
# Set background color for each level | |
ax.set_facecolor('#f8f9fa') | |
# Add title with platform-specific color | |
platform_colors = { | |
'DeepSeek': '#6b5b95', | |
'Chat GPT': '#3498db', | |
'CoPilot': '#f39c12', | |
'GC AI': '#1abc9c', | |
'Notebook LM': '#e74c3c', | |
'Vecflow': '#9b59b6', | |
} | |
color = platform_colors.get(platform_name, '#8884d8') | |
plt.title(f'{platform_name} Performance Metrics', size=16, fontweight='bold', color=color, pad=20) | |
# Add legend | |
plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1), frameon=True, facecolor='white', edgecolor='#dddddd') | |
plt.tight_layout() | |
return plt.gca() | |
return (plot_platform_radar,) | |
def _(mapped_trend_data, pd, plt, sns): | |
def plot_task_performance(platform_name): | |
"""Create an enhanced line chart for task performance""" | |
# Convert to DataFrame | |
data = pd.DataFrame(mapped_trend_data[platform_name]) | |
# Set a theme | |
sns.set_style('whitegrid') | |
plt.figure(figsize=(10, 6), facecolor='#f8f9fa') | |
# Platform-specific colors | |
platform_colors = { | |
'DeepSeek': ('#6b5b95', '#d64161'), | |
'Chat GPT': ('#3498db', '#1abc9c'), | |
'CoPilot': ('#f39c12', '#e67e22'), | |
'GC AI': ('#1abc9c', '#16a085'), | |
'Notebook LM': ('#e74c3c', '#c0392b'), | |
'Vecflow': ('#9b59b6', '#8e44ad'), | |
} | |
arthur_color, anna_color = platform_colors.get(platform_name, ('#8884d8', '#82ca9d')) | |
# Plot lines with enhanced styling | |
plt.plot( | |
data['task'], | |
data['arthur'], | |
marker='o', | |
markersize=10, | |
linestyle='-', | |
linewidth=2.5, | |
label="Arthur's Evaluation", | |
color=arthur_color, | |
alpha=0.9, | |
) | |
plt.plot( | |
data['task'], | |
data['anna'], | |
marker='s', | |
markersize=10, | |
linestyle='-', | |
linewidth=2.5, | |
label="Anna's Evaluation", | |
color=anna_color, | |
alpha=0.9, | |
) | |
# Customize plot | |
plt.title(f'{platform_name} Task Performance', fontsize=16, fontweight='bold') | |
plt.xlabel('Task Number', fontsize=12, fontweight='bold') | |
plt.ylabel('Result', fontsize=12, fontweight='bold') | |
# Set y-axis to show Pass/Fail instead of 1/0 | |
plt.yticks([0, 1], ['Fail', 'Pass'], fontsize=12) | |
# Ensure x-axis shows integer task numbers | |
plt.xticks(data['task'], fontsize=11) | |
plt.grid(True, linestyle='--', alpha=0.7) | |
# Enhanced legend | |
legend = plt.legend( | |
loc='upper center', bbox_to_anchor=(0.5, -0.15), facecolor='white', edgecolor='#dddddd', shadow=True, ncol=2, fontsize=12 | |
) | |
# Add a border to the plot | |
ax = plt.gca() | |
for spine in ax.spines.values(): | |
spine.set_edgecolor('#dddddd') | |
spine.set_linewidth(1.5) | |
plt.tight_layout() | |
return plt.gca() | |
return (plot_task_performance,) | |
def _(platforms_data): | |
def display_platform_evaluation(platform_name): | |
"""Display platform strengths and weaknesses with HTML styling""" | |
strengths = platforms_data[platform_name]['strengths'] | |
weaknesses = platforms_data[platform_name]['weaknesses'] | |
# Platform-specific color | |
platform_colors = { | |
'DeepSeek': '#6b5b95', | |
'Chat GPT': '#3498db', | |
'CoPilot': '#f39c12', | |
'GC AI': '#1abc9c', | |
'Notebook LM': '#e74c3c', | |
'Vecflow': '#9b59b6', | |
} | |
color = platform_colors.get(platform_name, '#8884d8') | |
html_output = f""" | |
<div style="background-color: #f8f9fa; padding: 20px; border-radius: 10px; border: 1px solid #dddddd; margin: 15px 0;"> | |
<h2 style="color: {color}; text-align: center; margin-bottom: 20px; border-bottom: 2px solid {color}; padding-bottom: 10px;"> | |
{platform_name} Evaluation | |
</h2> | |
<div style="display: flex; flex-wrap: wrap; gap: 20px;"> | |
<div style="flex: 1; min-width: 300px; background-color: white; border-radius: 8px; padding: 15px; border: 1px solid #eaeaea; box-shadow: 0 2px 4px rgba(0,0,0,0.1);"> | |
<h3 style="color: #28a745; margin-bottom: 15px; border-bottom: 1px solid #eaeaea; padding-bottom: 8px;">Key Strengths</h3> | |
<ul style="list-style-type: none; padding-left: 5px; margin-bottom: 0;"> | |
""" | |
for strength in strengths: | |
html_output += f'<li style="margin-bottom: 10px; display: flex; align-items: center;"><span style="color: #28a745; margin-right: 10px; font-size: 18px;">✅</span> {strength}</li>' | |
html_output += """ | |
</ul> | |
</div> | |
<div style="flex: 1; min-width: 300px; background-color: white; border-radius: 8px; padding: 15px; border: 1px solid #eaeaea; box-shadow: 0 2px 4px rgba(0,0,0,0.1);"> | |
<h3 style="color: #dc3545; margin-bottom: 15px; border-bottom: 1px solid #eaeaea; padding-bottom: 8px;">Areas for Improvement</h3> | |
<ul style="list-style-type: none; padding-left: 5px; margin-bottom: 0;"> | |
""" | |
for weakness in weaknesses: | |
html_output += f'<li style="margin-bottom: 10px; display: flex; align-items: center;"><span style="color: #dc3545; margin-right: 10px; font-size: 18px;">⚠️</span> {weakness}</li>' | |
html_output += """ | |
</ul> | |
</div> | |
</div> | |
</div> | |
""" | |
from IPython.display import HTML, display | |
display(HTML(html_output)) | |
return None | |
return (display_platform_evaluation,) | |
def _(np, plt, sns, task_type_data): | |
def plot_task_type_performance(): | |
"""Create an enhanced bar chart for task type performance""" | |
# Set a theme | |
sns.set_style('whitegrid') | |
plt.figure(figsize=(12, 6), facecolor='#f8f9fa') | |
# Customize colors | |
colors = {'arthur': '#6b5b95', 'anna': '#d64161'} | |
# Set width of bars | |
bar_width = 0.35 | |
# Set positions of bars on x-axis | |
x = np.arange(len(task_type_data)) | |
# Create bars with enhanced styling | |
plt.bar( | |
x - bar_width / 2, | |
task_type_data['arthur'], | |
bar_width, | |
label="Arthur's Rating", | |
color=colors['arthur'], | |
edgecolor='white', | |
linewidth=1.5, | |
alpha=0.9, | |
) | |
plt.bar( | |
x + bar_width / 2, | |
task_type_data['anna'], | |
bar_width, | |
label="Anna's Rating", | |
color=colors['anna'], | |
edgecolor='white', | |
linewidth=1.5, | |
alpha=0.9, | |
) | |
# Add labels and title with enhanced styling | |
plt.xlabel('Task Type', fontsize=12, fontweight='bold') | |
plt.ylabel('Average Score (%)', fontsize=12, fontweight='bold') | |
plt.title('Task Type Performance Analysis', fontsize=16, fontweight='bold') | |
# Add xticks on the middle of the group bars with better formatting | |
plt.xticks(x, task_type_data['name'], rotation=30, ha='right', fontsize=11, fontweight='bold') | |
# Create enhanced legend | |
legend = plt.legend( | |
loc='upper center', bbox_to_anchor=(0.5, -0.15), facecolor='white', edgecolor='#dddddd', shadow=True, ncol=2, fontsize=12 | |
) | |
# Add value labels on top of each bar | |
for i, v in enumerate(task_type_data['arthur']): | |
plt.text(i - bar_width / 2, v + 2, str(v), ha='center', fontsize=9, fontweight='bold') | |
for i, v in enumerate(task_type_data['anna']): | |
plt.text(i + bar_width / 2, v + 2, str(v), ha='center', fontsize=9, fontweight='bold') | |
# Add grid | |
plt.grid(True, linestyle='--', alpha=0.7, axis='y') | |
# Add a border to the plot | |
ax = plt.gca() | |
for spine in ax.spines.values(): | |
spine.set_edgecolor('#dddddd') | |
spine.set_linewidth(1.5) | |
# Adjust layout | |
plt.tight_layout() | |
return plt.gca() | |
return (plot_task_type_performance,) | |
def _( | |
display_platform_evaluation, | |
platform_summary, | |
plot_platform_radar, | |
plot_task_performance, | |
): | |
def analyze_platform(platform_name='DeepSeek'): | |
"""Create a comprehensive analysis for a single platform""" | |
# Display the platform name | |
print(f'# AI Platform In-Depth Analysis: {platform_name}\n') | |
# Create the radar chart for metrics | |
print('## Performance Metrics') | |
plot_platform_radar(platform_name) | |
# Show task performance | |
print('\n## Task Performance') | |
plot_task_performance(platform_name) | |
# Display strengths and weaknesses | |
print('\n## Platform Evaluation') | |
display_platform_evaluation(platform_name) | |
# Show platform summary | |
print('\n## Platform Summary') | |
platform_summary(platform_name) | |
return None | |
return (analyze_platform,) | |
def _(compare_all_platforms): | |
# Compare all platforms | |
compare_all_platforms() | |
return | |
def _(platforms_data): | |
def platform_selector(): | |
"""Prints available platforms and prompt for selection""" | |
print('Available platforms for analysis:') | |
for i, platform in enumerate(platforms_data.keys(), 1): | |
print(f'{i}. {platform}') | |
print('\nTo analyze a platform, run:') | |
print('analyze_platform("platform_name")') | |
print('\nTo compare all platforms, run:') | |
print('compare_all_platforms()') | |
return None | |
# Display available platforms | |
platform_selector() | |
return (platform_selector,) | |
def _(compare_all_platforms): | |
compare_all_platforms() | |
return | |
def _(): | |
return | |
def _(plot_platform_radar_interactive): | |
# This function appears to be defined but not called | |
plot_platform_radar_interactive('DeepSeek') | |
return | |
def _(plot_platform_radar_interactive): | |
# This function appears to be defined but not called | |
plot_platform_radar_interactive('DeepSeek') | |
return | |
def _(compare_all_platforms_interactive): | |
# Execute the compare_all_platforms_interactive function | |
compare_all_platforms_interactive() | |
return | |
def _(platform_selector): | |
# Call platform_selector to display available platforms | |
platform_selector() | |
return | |
def _(): | |
return | |
def _(pd): | |
import json | |
from IPython.display import HTML, display | |
# Convert the agreement data into a Python structure | |
agreement_data = [ | |
{'platform': 'Chat GPT', 'arthurValue': 1.5, 'annaValue': 1.25, 'category': 'Helpfulness'}, | |
{'platform': 'CoPilot', 'arthurValue': 1.0, 'annaValue': 1.33, 'category': 'Helpfulness'}, | |
{'platform': 'DeepSeek', 'arthurValue': 1.33, 'annaValue': 2.0, 'category': 'Helpfulness'}, | |
{'platform': 'GC AI', 'arthurValue': 1.4, 'annaValue': 0.5, 'category': 'Helpfulness'}, | |
{'platform': 'Notebook LM', 'arthurValue': 0.8, 'annaValue': 1.2, 'category': 'Helpfulness'}, | |
{'platform': 'Vecflow', 'arthurValue': 0.6, 'annaValue': 0.6, 'category': 'Helpfulness'}, | |
{'platform': 'Chat GPT', 'arthurValue': 1.75, 'annaValue': 1.25, 'category': 'Adequate Length'}, | |
{'platform': 'CoPilot', 'arthurValue': 1.2, 'annaValue': 1.33, 'category': 'Adequate Length'}, | |
{'platform': 'DeepSeek', 'arthurValue': 2.0, 'annaValue': 1.67, 'category': 'Adequate Length'}, | |
{'platform': 'GC AI', 'arthurValue': 1.8, 'annaValue': 1.0, 'category': 'Adequate Length'}, | |
{'platform': 'Notebook LM', 'arthurValue': 1.6, 'annaValue': 2.0, 'category': 'Adequate Length'}, | |
{'platform': 'Vecflow', 'arthurValue': 1.8, 'annaValue': 1.4, 'category': 'Adequate Length'}, | |
] | |
# Convert pass/fail agreement data | |
pass_fail_agreement = [ | |
{'platform': 'Chat GPT', 'arthur': 100, 'anna': 40, 'agreement': 'Disagree'}, | |
{'platform': 'CoPilot', 'arthur': 40, 'anna': 60, 'agreement': 'Disagree'}, | |
{'platform': 'DeepSeek', 'arthur': 75, 'anna': 100, 'agreement': 'Disagree'}, | |
{'platform': 'GC AI', 'arthur': 60, 'anna': 40, 'agreement': 'Disagree'}, | |
{'platform': 'Notebook LM', 'arthur': 60, 'anna': 60, 'agreement': 'Agree'}, | |
{'platform': 'Vecflow', 'arthur': 60, 'anna': 40, 'agreement': 'Disagree'}, | |
] | |
# Calculate correlations using pandas for accuracy | |
def calculate_correlations(): | |
helpfulness_data = pd.DataFrame([item for item in agreement_data if item['category'] == 'Helpfulness']) | |
adequate_length_data = pd.DataFrame([item for item in agreement_data if item['category'] == 'Adequate Length']) | |
pass_fail_data = pd.DataFrame(pass_fail_agreement) | |
helpfulness_correlation = helpfulness_data['arthurValue'].corr(helpfulness_data['annaValue']) | |
adequate_length_correlation = adequate_length_data['arthurValue'].corr(adequate_length_data['annaValue']) | |
pass_rate_correlation = pass_fail_data['arthur'].corr(pass_fail_data['anna']) | |
return { | |
'helpfulness': round(helpfulness_correlation, 2), | |
'adequate_length': round(adequate_length_correlation, 2), | |
'pass_rate': round(pass_rate_correlation, 2), | |
} | |
correlations = calculate_correlations() | |
return ( | |
HTML, | |
agreement_data, | |
calculate_correlations, | |
correlations, | |
display, | |
json, | |
pass_fail_agreement, | |
) | |
def _(correlations): | |
correlations | |
return | |
def _( | |
agree_count, | |
agreement_data, | |
calculate_average_metrics, | |
correlations, | |
disagree_count, | |
np, | |
pass_fail_agreement, | |
pd, | |
plt, | |
): | |
def _(): | |
def _(): | |
def interactive_evaluator_dashboard(): | |
"""Display an interactive dashboard for evaluator analysis""" | |
from IPython.display import display, Markdown, HTML | |
# Display header | |
display( | |
HTML(""" | |
<div style="background-color: #f8f9fa; padding: 20px; border-radius: 10px; text-align: center; margin-bottom: 20px;"> | |
<h1 style="color: #333; margin-bottom: 10px;">Evaluator Comparison Analysis</h1> | |
<p style="font-style: italic; color: #666;">Analyzing differences between Arthur's and Anna's evaluations</p> | |
</div> | |
""") | |
) | |
# Display Agreement Section | |
display(Markdown('## Agreement Overview')) | |
# Create side-by-side visualizations | |
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 7)) | |
# Agreement Pie Chart | |
labels = ['Agreement', 'Disagreement'] | |
sizes = [agree_count, disagree_count] | |
colors = ['#4CAF50', '#F44336'] | |
explode = (0.1, 0) | |
ax1.pie( | |
sizes, | |
explode=explode, | |
labels=labels, | |
colors=colors, | |
autopct='%1.1f%%', | |
shadow=True, | |
startangle=140, | |
textprops={'fontsize': 12, 'fontweight': 'bold'}, | |
) | |
ax1.set_title('Evaluator Pass/Fail Agreement', fontsize=16, fontweight='bold') | |
# Average Scores Bar Chart | |
avg_df = calculate_average_metrics() | |
# Set width of bars | |
bar_width = 0.35 | |
x = np.arange(len(avg_df)) | |
# Create bars | |
ax2.bar( | |
x - bar_width / 2, | |
avg_df['Arthur'], | |
width=bar_width, | |
label="Arthur's Avg", | |
color='#8884d8', | |
edgecolor='white', | |
linewidth=1.5, | |
) | |
ax2.bar( | |
x + bar_width / 2, avg_df['Anna'], width=bar_width, label="Anna's Avg", color='#82ca9d', edgecolor='white', linewidth=1.5 | |
) | |
# Add data labels | |
for i in range(len(x)): | |
ax2.text( | |
x[i] - bar_width / 2, | |
avg_df['Arthur'][i] + 0.05, | |
f'{avg_df["Arthur"][i]:.2f}', | |
ha='center', | |
va='bottom', | |
fontweight='bold', | |
fontsize=10, | |
) | |
ax2.text( | |
x[i] + bar_width / 2, | |
avg_df['Anna'][i] + 0.05, | |
f'{avg_df["Anna"][i]:.2f}', | |
ha='center', | |
va='bottom', | |
fontweight='bold', | |
fontsize=10, | |
) | |
# Customize plot | |
ax2.set_xlabel('Category', fontsize=12, fontweight='bold') | |
ax2.set_ylabel('Average Score', fontsize=12, fontweight='bold') | |
ax2.set_title('Average Scores by Evaluator', fontsize=16, fontweight='bold') | |
ax2.set_xticks(x) | |
ax2.set_xticklabels(avg_df['Category'], fontsize=12) | |
ax2.set_ylim(0, 2.2) | |
ax2.grid(axis='y', linestyle='--', alpha=0.7) | |
ax2.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), ncol=2, fontsize=12) | |
plt.tight_layout() | |
display(plt.gcf()) | |
plt.close() | |
# Now show correlation analysis | |
display(Markdown('## Correlation Analysis')) | |
# Create correlations chart | |
fig, ax = plt.subplots(figsize=(10, 6)) | |
metrics = ['Helpfulness', 'Adequate Length', 'Pass Rate'] | |
corr_values = [correlations['helpfulness'], correlations['adequate_length'], correlations['pass_rate']] | |
bars = ax.bar(metrics, corr_values) | |
# Colorize bars based on correlation (positive or negative) | |
for i, bar in enumerate(bars): | |
if corr_values[i] < 0: | |
bar.set_color('#F44336') # red for negative correlation | |
else: | |
bar.set_color('#4CAF50') # green for positive correlation | |
# Add correlation values above/below bars | |
for i, v in enumerate(corr_values): | |
if v >= 0: | |
ax.text(i, v + 0.05, f'{v:.2f}', ha='center', fontweight='bold') | |
else: | |
ax.text(i, v - 0.1, f'{v:.2f}', ha='center', fontweight='bold') | |
# Add reference line at y=0 | |
ax.axhline(y=0, color='black', linestyle='-', alpha=0.3) | |
# Set y-axis limits to show the full range -1 to 1 | |
ax.set_ylim(-1.1, 1.1) | |
ax.set_title('Evaluator Correlation Analysis', fontsize=14, fontweight='bold') | |
ax.set_ylabel('Correlation Coefficient', fontsize=12) | |
ax.text( | |
1, | |
-0.9, | |
'Range: -1 to 1, where 1 is perfect positive correlation,\n-1 is perfect negative correlation, and 0 is no correlation', | |
fontsize=8, | |
ha='center', | |
style='italic', | |
) | |
plt.tight_layout() | |
display(plt.gcf()) | |
plt.close() | |
# Display scatter plots | |
display(Markdown('## Score Comparison Scatter Plots')) | |
# Create a 1x2 grid for helpfulness and adequate length scatter plots | |
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 7)) | |
# Helpfulness Scatter Plot | |
helpfulness_data = [item for item in agreement_data if item['category'] == 'Helpfulness'] | |
x1 = [item['arthurValue'] for item in helpfulness_data] | |
y1 = [item['annaValue'] for item in helpfulness_data] | |
platforms1 = [item['platform'] for item in helpfulness_data] | |
scatter1 = ax1.scatter(x1, y1, c='#8884d8', s=100, alpha=0.7) | |
# Add platform labels | |
for i, platform in enumerate(platforms1): | |
ax1.annotate(platform, (x1[i], y1[i]), textcoords='offset points', xytext=(0, 10), ha='center') | |
# Add axis labels | |
ax1.set_xlabel("Arthur's Rating", fontsize=12) | |
ax1.set_ylabel("Anna's Rating", fontsize=12) | |
ax1.set_title('Helpfulness Correlation', fontsize=14, fontweight='bold') | |
# Set axis limits | |
ax1.set_xlim(0, 2) | |
ax1.set_ylim(0, 2) | |
# Add perfect correlation line | |
ax1.plot([0, 2], [0, 2], 'k--', alpha=0.3) | |
# Add correlation value text | |
ax1.text(0.1, 1.8, f'Correlation: {correlations["helpfulness"]}', fontsize=12, bbox=dict(facecolor='white', alpha=0.5)) | |
ax1.grid(True, linestyle='--', alpha=0.3) | |
# Adequate Length Scatter Plot | |
adequate_length_data = [item for item in agreement_data if item['category'] == 'Adequate Length'] | |
x2 = [item['arthurValue'] for item in adequate_length_data] | |
y2 = [item['annaValue'] for item in adequate_length_data] | |
platforms2 = [item['platform'] for item in adequate_length_data] | |
scatter2 = ax2.scatter(x2, y2, c='#82ca9d', s=100, alpha=0.7) | |
# Add platform labels | |
for i, platform in enumerate(platforms2): | |
ax2.annotate(platform, (x2[i], y2[i]), textcoords='offset points', xytext=(0, 10), ha='center') | |
# Add axis labels | |
ax2.set_xlabel("Arthur's Rating", fontsize=12) | |
ax2.set_ylabel("Anna's Rating", fontsize=12) | |
ax2.set_title('Adequate Length Correlation', fontsize=14, fontweight='bold') | |
# Set axis limits | |
ax2.set_xlim(0, 2) | |
ax2.set_ylim(0, 2) | |
# Add perfect correlation line | |
ax2.plot([0, 2], [0, 2], 'k--', alpha=0.3) | |
# Add correlation value text | |
ax2.text(0.1, 1.8, f'Correlation: {correlations["adequate_length"]}', fontsize=12, bbox=dict(facecolor='white', alpha=0.5)) | |
ax2.grid(True, linestyle='--', alpha=0.3) | |
plt.tight_layout() | |
display(plt.gcf()) | |
plt.close() | |
# Pass Rate Correlation Scatter Plot | |
display(Markdown('## Pass Rate Comparison')) | |
plt.figure(figsize=(10, 6)) | |
x = [item['arthur'] for item in pass_fail_agreement] | |
y = [item['anna'] for item in pass_fail_agreement] | |
platforms = [item['platform'] for item in pass_fail_agreement] | |
colors = ['#4CAF50' if item['agreement'] == 'Agree' else '#F44336' for item in pass_fail_agreement] | |
scatter = plt.scatter(x, y, c=colors, s=100, alpha=0.7) | |
# Add platform labels | |
for i, platform in enumerate(platforms): | |
plt.annotate(platform, (x[i], y[i]), textcoords='offset points', xytext=(0, 10), ha='center') | |
# Add axis labels | |
plt.xlabel("Arthur's Pass Rate (%)", fontsize=12) | |
plt.ylabel("Anna's Pass Rate (%)", fontsize=12) | |
plt.title('Pass Rate Correlation', fontsize=14, fontweight='bold') | |
# Set axis limits | |
plt.xlim(30, 105) | |
plt.ylim(30, 105) | |
# Add perfect correlation line | |
plt.plot([30, 105], [30, 105], 'k--', alpha=0.3) | |
# Add correlation value text | |
plt.text(35, 95, f'Correlation: {correlations["pass_rate"]}', fontsize=12, bbox=dict(facecolor='white', alpha=0.5)) | |
# Add legend | |
from matplotlib.lines import Line2D | |
legend_elements = [ | |
Line2D([0], [0], marker='o', color='w', markerfacecolor='#4CAF50', markersize=10, label='Agreement'), | |
Line2D([0], [0], marker='o', color='w', markerfacecolor='#F44336', markersize=10, label='Disagreement'), | |
] | |
plt.legend(handles=legend_elements, loc='upper left') | |
plt.grid(True, linestyle='--', alpha=0.3) | |
plt.tight_layout() | |
display(plt.gcf()) | |
plt.close() | |
# Platform-specific differences | |
display(Markdown('## Platform-specific Evaluator Differences')) | |
# Calculate platform differences if not already done | |
if not 'display_df' in globals(): | |
platform_differences = [] | |
for platform in set(item['platform'] for item in agreement_data): | |
helpfulness = next( | |
(item for item in agreement_data if item['platform'] == platform and item['category'] == 'Helpfulness'), None | |
) | |
adequate_length = next( | |
(item for item in agreement_data if item['platform'] == platform and item['category'] == 'Adequate Length'), None | |
) | |
pass_fail = next((item for item in pass_fail_agreement if item['platform'] == platform), None) | |
if helpfulness and adequate_length and pass_fail: | |
helpfulness_diff = helpfulness['arthurValue'] - helpfulness['annaValue'] | |
adequate_length_diff = adequate_length['arthurValue'] - adequate_length['annaValue'] | |
pass_rate_diff = pass_fail['arthur'] - pass_fail['anna'] | |
return platform_differences.append( | |
{ | |
'Platform': platform, | |
'Helpfulness Diff': helpfulness_diff, | |
'Adequate Length Diff': adequate_length_diff, | |
'Pass Rate Diff': pass_rate_diff, | |
'Agreement': pass_fail['agreement'], | |
} | |
) | |
platform_differences = [] | |
for platform in set(item['platform'] for item in agreement_data): | |
helpfulness = next((item for item in agreement_data if item['platform'] == platform and item['category'] == 'Helpfulness'), None) | |
adequate_length = next( | |
(item for item in agreement_data if item['platform'] == platform and item['category'] == 'Adequate Length'), None | |
) | |
pass_fail = next((item for item in pass_fail_agreement if item['platform'] == platform), None) | |
if helpfulness and adequate_length and pass_fail: | |
helpfulness_diff = helpfulness['arthurValue'] - helpfulness['annaValue'] | |
adequate_length_diff = adequate_length['arthurValue'] - adequate_length['annaValue'] | |
pass_rate_diff = pass_fail['arthur'] - pass_fail['anna'] | |
return platform_differences.append( | |
{ | |
'Platform': platform, | |
'Helpfulness Diff': helpfulness_diff, | |
'Adequate Length Diff': adequate_length_diff, | |
'Pass Rate Diff': pass_rate_diff, | |
'Agreement': pass_fail['agreement'], | |
} | |
) | |
platform_diff_df = pd.DataFrame(platform_differences) | |
_() | |
return | |
def _(correlations, plt): | |
# Creating Correlation Analysis Chart | |
fig, ax = plt.subplots(figsize=(10, 6)) | |
metrics = ['Helpfulness', 'Adequate Length', 'Pass Rate'] | |
corr_values = [correlations['helpfulness'], correlations['adequate_length'], correlations['pass_rate']] | |
bars = ax.bar(metrics, corr_values, color=['#8884d8', '#82ca9d', '#ff7300']) | |
# Colorize bars based on correlation (positive or negative) | |
for i, bar in enumerate(bars): | |
if corr_values[i] < 0: | |
bar.set_color('#F44336') # red for negative correlation | |
else: | |
bar.set_color('#4CAF50') # green for positive correlation | |
# Add correlation values above/below bars | |
for i, v in enumerate(corr_values): | |
if v >= 0: | |
ax.text(i, v + 0.05, f'{v:.2f}', ha='center', fontweight='bold') | |
else: | |
ax.text(i, v - 0.1, f'{v:.2f}', ha='center', fontweight='bold') | |
# Add reference line at y=0 | |
ax.axhline(y=0, color='black', linestyle='-', alpha=0.3) | |
# Set y-axis limits to show the full range -1 to 1 | |
ax.set_ylim(-1.1, 1.1) | |
# Add labels and title | |
ax.set_title('Evaluator Correlation Analysis', fontsize=14, fontweight='bold') | |
ax.set_ylabel('Correlation Coefficient', fontsize=12) | |
ax.text( | |
1, | |
-0.9, | |
'Range: -1 to 1, where 1 is perfect positive correlation,\n-1 is perfect negative correlation, and 0 is no correlation', | |
fontsize=8, | |
ha='center', | |
style='italic', | |
) | |
plt.tight_layout() | |
return ax, bar, bars, corr_values, fig, i, metrics, v | |
def _(agreement_data, correlations, plt): | |
def _(): | |
# Create Helpfulness Correlation Scatter Plot | |
helpfulness_data = [item for item in agreement_data if item['category'] == 'Helpfulness'] | |
fig, ax = plt.subplots(figsize=(8, 6)) | |
x = [item['arthurValue'] for item in helpfulness_data] | |
y = [item['annaValue'] for item in helpfulness_data] | |
platforms = [item['platform'] for item in helpfulness_data] | |
scatter = ax.scatter(x, y, c='#8884d8', s=100, alpha=0.7) | |
# Add platform labels | |
for i, platform in enumerate(platforms): | |
ax.annotate(platform, (x[i], y[i]), textcoords='offset points', xytext=(0, 10), ha='center') | |
# Add axis labels | |
ax.set_xlabel("Arthur's Rating", fontsize=12) | |
ax.set_ylabel("Anna's Rating", fontsize=12) | |
ax.set_title('Helpfulness Correlation', fontsize=14, fontweight='bold') | |
# Set axis limits | |
ax.set_xlim(0, 2) | |
ax.set_ylim(0, 2) | |
# Add perfect correlation line | |
ax.plot([0, 2], [0, 2], 'k--', alpha=0.3) | |
# Add correlation value text | |
ax.text(0.1, 1.8, f'Correlation: {correlations["helpfulness"]}', fontsize=12, bbox=dict(facecolor='white', alpha=0.5)) | |
plt.grid(True, linestyle='--', alpha=0.3) | |
return plt.tight_layout() | |
_() | |
return | |
def _(agreement_data, pass_fail_agreement, pd): | |
def _(): | |
# Create a DataFrame to show platform-specific differences | |
platform_differences = [] | |
for platform in set(item['platform'] for item in agreement_data): | |
helpfulness = next((item for item in agreement_data if item['platform'] == platform and item['category'] == 'Helpfulness'), None) | |
adequate_length = next( | |
(item for item in agreement_data if item['platform'] == platform and item['category'] == 'Adequate Length'), None | |
) | |
pass_fail = next((item for item in pass_fail_agreement if item['platform'] == platform), None) | |
if helpfulness and adequate_length and pass_fail: | |
helpfulness_diff = helpfulness['arthurValue'] - helpfulness['annaValue'] | |
adequate_length_diff = adequate_length['arthurValue'] - adequate_length['annaValue'] | |
pass_rate_diff = pass_fail['arthur'] - pass_fail['anna'] | |
return platform_differences.append( | |
{ | |
'Platform': platform, | |
'Helpfulness Diff': helpfulness_diff, | |
'Adequate Length Diff': adequate_length_diff, | |
'Pass Rate Diff': pass_rate_diff, | |
'Agreement': pass_fail['agreement'], | |
} | |
) | |
platform_diff_df = pd.DataFrame(platform_differences) | |
# Display platform differences | |
platform_diff_df['Helpfulness Diff'] = platform_diff_df['Helpfulness Diff'].round(1) | |
platform_diff_df['Adequate Length Diff'] = platform_diff_df['Adequate Length Diff'].round(1) | |
platform_diff_df['Pass Rate Diff'] = platform_diff_df['Pass Rate Diff'].astype(int) | |
def style_diff(val): | |
if val > 0: | |
return f'Arthur +{abs(val)}' | |
elif val < 0: | |
return f'Anna +{abs(val)}' | |
else: | |
return 'Equal' | |
# Apply styling and display the data | |
styled_platform_diff = platform_diff_df.copy() | |
styled_platform_diff['Helpfulness'] = styled_platform_diff['Helpfulness Diff'].apply(style_diff) | |
styled_platform_diff['Adequate Length'] = styled_platform_diff['Adequate Length Diff'].apply(style_diff) | |
styled_platform_diff['Pass Rate'] = styled_platform_diff['Pass Rate Diff'].apply(style_diff) | |
display_cols = ['Platform', 'Helpfulness', 'Adequate Length', 'Pass Rate', 'Agreement'] | |
display_df = styled_platform_diff[display_cols] | |
_() | |
return | |
def _(calculate_average_metrics, np, plt): | |
def plot_average_scores(): | |
"""Plot the average scores for each category by evaluator""" | |
# Get average data | |
avg_df = calculate_average_metrics() | |
# Set up plot | |
plt.figure(figsize=(10, 6)) | |
# Set width of bars | |
bar_width = 0.35 | |
x = np.arange(len(avg_df)) | |
# Create bars | |
plt.bar( | |
x - bar_width / 2, | |
avg_df['Arthur'], | |
width=bar_width, | |
label="Arthur's Avg. Score", | |
color='#8884d8', | |
alpha=0.8, | |
edgecolor='white', | |
linewidth=1.5, | |
) | |
plt.bar( | |
x + bar_width / 2, | |
avg_df['Anna'], | |
width=bar_width, | |
label="Anna's Avg. Score", | |
color='#82ca9d', | |
alpha=0.8, | |
edgecolor='white', | |
linewidth=1.5, | |
) | |
# Add data labels | |
for i in range(len(x)): | |
plt.text( | |
x[i] - bar_width / 2, | |
avg_df['Arthur'][i] + 0.05, | |
f'{avg_df["Arthur"][i]:.2f}', | |
ha='center', | |
va='bottom', | |
color='#333', | |
fontweight='bold', | |
) | |
plt.text( | |
x[i] + bar_width / 2, | |
avg_df['Anna'][i] + 0.05, | |
f'{avg_df["Anna"][i]:.2f}', | |
ha='center', | |
va='bottom', | |
color='#333', | |
fontweight='bold', | |
) | |
# Customize plot | |
plt.xlabel('Evaluation Category', fontsize=12, fontweight='bold') | |
plt.ylabel('Average Score (0-2 scale)', fontsize=12, fontweight='bold') | |
plt.title('Average Scores by Evaluator', fontsize=14, fontweight='bold') | |
plt.xticks(x, avg_df['Category'], fontsize=11) | |
plt.ylim(0, 2.2) # Set reasonable y-axis limit | |
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=2) | |
plt.grid(axis='y', linestyle='--', alpha=0.7) | |
# Add a border to the plot | |
ax = plt.gca() | |
for spine in ax.spines.values(): | |
spine.set_edgecolor('#dddddd') | |
spine.set_linewidth(1.5) | |
plt.tight_layout() | |
return plt.gca() | |
return (plot_average_scores,) | |
def _(agreement_data, pass_fail_agreement, pd): | |
def calculate_average_metrics(): | |
"""Calculate average metrics for each evaluator and category""" | |
# Process helpfulness data | |
helpfulness_data = [item for item in agreement_data if item['category'] == 'Helpfulness'] | |
arthur_helpfulness = sum(item['arthurValue'] for item in helpfulness_data) / len(helpfulness_data) | |
anna_helpfulness = sum(item['annaValue'] for item in helpfulness_data) / len(helpfulness_data) | |
# Process adequate length data | |
adequate_length_data = [item for item in agreement_data if item['category'] == 'Adequate Length'] | |
arthur_adequate = sum(item['arthurValue'] for item in adequate_length_data) / len(adequate_length_data) | |
anna_adequate = sum(item['annaValue'] for item in adequate_length_data) / len(adequate_length_data) | |
# Create DataFrame with results | |
avg_df = pd.DataFrame( | |
{ | |
'Category': ['Helpfulness', 'Adequate Length'], | |
'Arthur': [arthur_helpfulness, arthur_adequate], | |
'Anna': [anna_helpfulness, anna_adequate], | |
} | |
) | |
return avg_df | |
# Count agreement vs disagreement | |
agree_count = sum(1 for item in pass_fail_agreement if item['agreement'] == 'Agree') | |
disagree_count = sum(1 for item in pass_fail_agreement if item['agreement'] == 'Disagree') | |
return agree_count, calculate_average_metrics, disagree_count | |
def _(plot_average_scores): | |
plot_average_scores() | |
return | |
def _(agree_count, ax, calculate_average_metrics, disagree_count, np, plt): | |
def interactive_evaluator_dashboard(): | |
"""Display an interactive dashboard for evaluator analysis""" | |
from IPython.display import display, Markdown, HTML | |
# Display header | |
display( | |
HTML(""" | |
<div style="background-color: #f8f9fa; padding: 20px; border-radius: 10px; text-align: center; margin-bottom: 20px;"> | |
<h1 style="color: #333; margin-bottom: 10px;">Evaluator Comparison Analysis</h1> | |
<p style="font-style: italic; color: #666;">Analyzing differences between Arthur's and Anna's evaluations</p> | |
</div> | |
""") | |
) | |
# Display Agreement Section | |
display(Markdown('## Agreement Overview')) | |
# Create side-by-side visualizations | |
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 7)) | |
# Agreement Pie Chart | |
labels = ['Agreement', 'Disagreement'] | |
sizes = [agree_count, disagree_count] | |
colors = ['#4CAF50', '#F44336'] | |
explode = (0.1, 0) | |
ax1.pie( | |
sizes, | |
explode=explode, | |
labels=labels, | |
colors=colors, | |
autopct='%1.1f%%', | |
shadow=True, | |
startangle=140, | |
textprops={'fontsize': 12, 'fontweight': 'bold'}, | |
) | |
ax1.set_title('Evaluator Pass/Fail Agreement', fontsize=16, fontweight='bold') | |
# Average Scores Bar Chart | |
avg_df = calculate_average_metrics() | |
# Set width of bars | |
bar_width = 0.35 | |
x = np.arange(len(avg_df)) | |
# Create bars | |
ax2.bar(x - bar_width / 2, avg_df['Arthur'], width=bar_width, label="Arthur's Avg", color='#8884d8', edgecolor='white', linewidth=1.5) | |
ax2.bar(x + bar_width / 2, avg_df['Anna'], width=bar_width, label="Anna's Avg", color='#82ca9d', edgecolor='white', linewidth=1.5) | |
# Add data labels | |
for i in range(len(x)): | |
ax2.text( | |
x[i] - bar_width / 2, | |
avg_df['Arthur'][i] + 0.05, | |
f'{avg_df["Arthur"][i]:.2f}', | |
ha='center', | |
va='bottom', | |
fontweight='bold', | |
fontsize=10, | |
) | |
ax2.text( | |
x[i] + bar_width / 2, | |
avg_df['Anna'][i] + 0.05, | |
f'{avg_df["Anna"][i]:.2f}', | |
ha='center', | |
va='bottom', | |
fontweight='bold', | |
fontsize=10, | |
) | |
# Customize plot | |
ax2.set_xlabel('Category', fontsize=12, fontweight='bold') | |
ax2.set_ylabel('Average Score', fontsize=12, fontweight='bold') | |
ax | |
return (interactive_evaluator_dashboard,) | |
def _(interactive_evaluator_dashboard): | |
interactive_evaluator_dashboard() | |
return | |
if __name__ == "__main__": | |
app.run() | |