Spaces:
Running
Running
import gradio as gr | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from tqdm.auto import tqdm | |
import pandas as pd | |
from enum import Enum | |
from datetime import datetime, timedelta | |
plt.style.use('seaborn-v0_8-whitegrid') | |
# Define content modality types | |
class ContentModality(Enum): | |
TEXT = 1 | |
IMAGE = 2 | |
AUDIO = 3 | |
VIDEO = 4 | |
INTERACTIVE = 5 | |
MIXED = 6 | |
# Define columns for FSRS algorithm (from app.py) | |
columns = ["difficulty", "stability", "retrievability", "delta_t", | |
"reps", "lapses", "last_date", "due", "ivl", "cost", "rand"] | |
col = {key: i for i, key in enumerate(columns)} | |
first_rating_prob = np.array([0.15, 0.2, 0.6, 0.05]) | |
def moving_average(data, window_size=7): | |
"""Calculate moving average with the specified window size""" | |
weights = np.ones(window_size) / window_size | |
return np.convolve(data, weights, mode='valid') | |
# Spaced Repetition Simulation (from app.py) | |
def simulate_fsrs(w, request_retention=0.9, deck_size=10000, learn_span=100, | |
max_cost_perday=200, max_ivl=36500, recall_cost=10, | |
forget_cost=30, learn_cost=10, progress=None): | |
card_table = np.zeros((len(columns), deck_size)) | |
card_table[col["due"]] = learn_span | |
card_table[col["difficulty"]] = 1e-10 | |
card_table[col["stability"]] = 1e-10 | |
review_cnt_per_day = np.zeros(learn_span) | |
learn_cnt_per_day = np.zeros(learn_span) | |
memorized_cnt_per_day = np.zeros(learn_span) | |
def stability_after_success(s, r, d, response): | |
hard_penalty = np.where(response == 1, w[15], 1) | |
easy_bonus = np.where(response == 3, w[16], 1) | |
return s * (1 + np.exp(w[8]) * (11 - d) * np.power(s, -w[9]) * ( | |
np.exp((1 - r) * w[10]) - 1) * hard_penalty * easy_bonus) | |
def stability_after_failure(s, r, d): | |
return np.maximum(0.1, np.minimum( | |
w[11] * np.power(d, -w[12]) * (np.power(s + 1, w[13]) - 1) * np.exp((1 - r) * w[14]), s)) | |
iterator = tqdm(range(learn_span)) if progress is None else range(learn_span) | |
for today in iterator: | |
if progress is not None: | |
progress((today / learn_span) * 0.5) # Use first half of progress for FSRS | |
has_learned = card_table[col["stability"]] > 1e-10 | |
card_table[col["delta_t"]][has_learned] = today - \ | |
card_table[col["last_date"]][has_learned] | |
card_table[col["retrievability"]][has_learned] = np.power( | |
1 + card_table[col["delta_t"]][has_learned] / (9 * card_table[col["stability"]][has_learned]), -1) | |
card_table[col["cost"]] = 0 | |
need_review = card_table[col["due"]] <= today | |
card_table[col["rand"]][need_review] = np.random.rand( | |
np.sum(need_review)) | |
forget = card_table[col["rand"]] > card_table[col["retrievability"]] | |
card_table[col["cost"]][need_review & forget] = forget_cost | |
card_table[col["cost"]][need_review & ~forget] = recall_cost | |
true_review = need_review & ( | |
np.cumsum(card_table[col["cost"]]) <= max_cost_perday) | |
card_table[col["last_date"]][true_review] = today | |
card_table[col["lapses"]][true_review & forget] += 1 | |
card_table[col["reps"]][true_review & ~forget] += 1 | |
card_table[col["stability"]][true_review & forget] = stability_after_failure( | |
card_table[col["stability"]][true_review & forget], card_table[col["retrievability"]][true_review & forget], | |
card_table[col["difficulty"]][true_review & forget]) | |
review_ratings = np.random.choice([1, 2, 3], np.sum(true_review & ~forget), p=[0.3, 0.6, 0.1]) | |
card_table[col["stability"]][true_review & ~forget] = stability_after_success( | |
card_table[col["stability"]][true_review & ~forget], | |
card_table[col["retrievability"]][true_review & ~forget], | |
card_table[col["difficulty"]][true_review & ~forget], review_ratings) | |
card_table[col["difficulty"]][true_review & forget] = np.clip( | |
card_table[col["difficulty"]][true_review & forget] + 2 * w[6], 1, 10) | |
need_learn = card_table[col["due"]] == learn_span | |
card_table[col["cost"]][need_learn] = learn_cost | |
true_learn = need_learn & ( | |
np.cumsum(card_table[col["cost"]]) <= max_cost_perday) | |
card_table[col["last_date"]][true_learn] = today | |
first_ratings = np.random.choice(4, np.sum(true_learn), p=first_rating_prob) | |
card_table[col["stability"]][true_learn] = np.choose( | |
first_ratings, w[:4]) | |
card_table[col["difficulty"]][true_learn] = w[4] - \ | |
w[5] * (first_ratings - 3) | |
card_table[col["ivl"]][true_review | true_learn] = np.clip(np.round( | |
9 * card_table[col["stability"]][true_review | true_learn] * (1 / request_retention - 1), 0), 1, max_ivl) | |
card_table[col["due"]][true_review | true_learn] = today + \ | |
card_table[col["ivl"]][true_review | true_learn] | |
review_cnt_per_day[today] = np.sum(true_review) | |
learn_cnt_per_day[today] = np.sum(true_learn) | |
memorized_cnt_per_day[today] = card_table[col["retrievability"]].sum() | |
return card_table, review_cnt_per_day, learn_cnt_per_day, memorized_cnt_per_day | |
# Multimodal Learning Simulation | |
def simulate_multimodal_srs( | |
baseline_retention=0.9, | |
modality_weights=[1.0, 1.2, 0.9, 1.3, 1.4, 1.1], | |
learning_days=100, | |
cards_per_day=20, | |
initial_ease=2.5, | |
max_ease=3.5, | |
min_ease=1.3, | |
learning_rate=0.05, | |
max_cost_perday=200, | |
progress=None | |
): | |
"""Simulate the adaptive multimodal spaced repetition system over time.""" | |
# Initialize tracking arrays | |
total_cards = min(cards_per_day * learning_days, 10000) # Cap to reasonable size | |
reviews_per_day = np.zeros(learning_days) | |
retention_per_day = np.zeros(learning_days) | |
modality_usage = {mod: np.zeros(learning_days) for mod in ContentModality} | |
modality_success = {mod: np.zeros(learning_days) for mod in ContentModality} | |
# Card state tracking | |
card_ease = np.ones(total_cards) * initial_ease | |
card_interval = np.ones(total_cards) | |
card_due_day = np.zeros(total_cards) | |
card_reps = np.zeros(total_cards) | |
# When each card is introduced | |
card_intro_day = np.zeros(total_cards) | |
for i in range(total_cards): | |
card_intro_day[i] = i // cards_per_day | |
# System's belief about user preferences (starts neutral) | |
believed_modality_preference = np.ones(len(ContentModality)) | |
# User's true preferences (based on input weights) | |
true_modality_preference = np.array(modality_weights) | |
# Run the simulation | |
iterator = tqdm(range(learning_days)) if progress is None else range(learning_days) | |
for day in iterator: | |
if progress is not None: | |
progress(0.5 + (day / learning_days) * 0.5) # Use second half of progress for multimodal | |
# Find cards due today | |
due_mask = (card_due_day <= day) & (card_intro_day <= day) | |
due_cards = np.where(due_mask)[0] | |
# Track daily cost to stay within max_cost_perday | |
daily_cost = 0 | |
reviews_today = 0 | |
correct_today = 0 | |
# Randomize review order | |
if len(due_cards) > 0: | |
np.random.shuffle(due_cards) | |
# Process each due card | |
for card_id in due_cards: | |
# Check if we still have time budget | |
if daily_cost >= max_cost_perday: | |
break | |
reviews_today += 1 | |
# Choose modality based on current beliefs | |
modality_idx = np.random.choice( | |
len(ContentModality), | |
p=believed_modality_preference / believed_modality_preference.sum() | |
) | |
modality = ContentModality(modality_idx + 1) | |
# Track modality usage | |
modality_usage[modality][day] += 1 | |
# Calculate recall probability based on interval and modality | |
recall_prob = np.power(1 + card_interval[card_id] / (9 * card_ease[card_id]), -1) | |
mod_factor = true_modality_preference[modality.value - 1] | |
recall_prob = min(0.99, recall_prob * mod_factor) | |
# Simulate if user remembers card | |
remembered = np.random.random() < recall_prob | |
if remembered: | |
# Success - increase ease factor | |
card_ease[card_id] = min(max_ease, card_ease[card_id] + 0.1) | |
correct_today += 1 | |
modality_success[modality][day] += 1 | |
daily_cost += 10 # Review cost | |
# Update interval using SM-2 algorithm with modality | |
if card_reps[card_id] == 0: | |
card_interval[card_id] = 1 | |
elif card_reps[card_id] == 1: | |
card_interval[card_id] = 6 | |
else: | |
card_interval[card_id] = card_interval[card_id] * card_ease[card_id] | |
card_reps[card_id] += 1 | |
else: | |
# Failure - decrease ease factor | |
card_ease[card_id] = max(min_ease, card_ease[card_id] - 0.2) | |
card_interval[card_id] = 1 | |
card_reps[card_id] = 0 | |
daily_cost += 30 # Relearn cost | |
# Update due date | |
card_due_day[card_id] = day + max(1, int(card_interval[card_id])) | |
# Update belief about modality effectiveness | |
update_vector = np.zeros(len(ContentModality)) | |
update_vector[modality.value - 1] = learning_rate * (1 if remembered else -1) | |
believed_modality_preference += update_vector | |
# Ensure beliefs are positive | |
believed_modality_preference = np.maximum(0.1, believed_modality_preference) | |
# Add new cards if we have budget left | |
new_cards_today = 0 | |
for i in range(total_cards): | |
if card_intro_day[i] == day: | |
if daily_cost + 10 <= max_cost_perday: # Check if we can afford to learn | |
daily_cost += 10 # Learn cost | |
new_cards_today += 1 | |
else: | |
# Postpone introduction if no time left today | |
card_intro_day[i] += 1 | |
# Calculate daily stats | |
if reviews_today > 0: | |
retention_per_day[day] = correct_today / reviews_today | |
else: | |
retention_per_day[day] = 0 | |
reviews_per_day[day] = reviews_today | |
# Calculate effectiveness per modality | |
modality_effectiveness = {} | |
for mod in ContentModality: | |
usage = modality_usage[mod] | |
success = modality_success[mod] | |
effectiveness = np.zeros(learning_days) | |
for i in range(learning_days): | |
if usage[i] > 0: | |
effectiveness[i] = success[i] / usage[i] | |
modality_effectiveness[mod] = effectiveness | |
# Calculate average retention rate at the end | |
final_retention = np.mean(retention_per_day[max(0, learning_days - 10):]) | |
return { | |
'reviews_per_day': reviews_per_day, | |
'retention_per_day': retention_per_day, | |
'modality_usage': modality_usage, | |
'modality_effectiveness': modality_effectiveness, | |
'final_modality_beliefs': believed_modality_preference, | |
'true_modality_preference': true_modality_preference, | |
'final_retention': final_retention | |
} | |
def run_combined_simulation( | |
# FSRS parameters | |
fsrs_weights, | |
retrievability, | |
stability, | |
difficulty, | |
# Multimodal parameters | |
text_weight, | |
image_weight, | |
audio_weight, | |
video_weight, | |
interactive_weight, | |
mixed_weight, | |
# Shared parameters | |
target_retention, | |
learning_time, | |
learning_days, | |
deck_size, | |
max_ivl, | |
recall_cost, | |
forget_cost, | |
learn_cost, | |
learning_rate, | |
progress=gr.Progress() | |
): | |
"""Run both simulations and generate combined output""" | |
np.random.seed(42) # For reproducibility | |
# Parse FSRS weights | |
weights_str = ",".join([fsrs_weights, retrievability, stability, difficulty]).replace('[', '').replace(']', '') | |
w = list(map(lambda x: float(x.strip()), weights_str.split(','))) | |
# Calculate max cost per day in seconds | |
max_cost_perday = int(learning_time) * 60 | |
# Run FSRS simulation | |
(card_table, | |
fsrs_review_cnt, | |
fsrs_learn_cnt, | |
fsrs_memorized_cnt) = simulate_fsrs(w, | |
request_retention=float(target_retention), | |
deck_size=int(deck_size), | |
learn_span=int(learning_days), | |
max_cost_perday=max_cost_perday, | |
max_ivl=int(max_ivl), | |
recall_cost=int(recall_cost), | |
forget_cost=int(forget_cost), | |
learn_cost=int(learn_cost), | |
progress=progress) | |
# Run multimodal simulation | |
modality_weights = [ | |
float(text_weight), | |
float(image_weight), | |
float(audio_weight), | |
float(video_weight), | |
float(interactive_weight), | |
float(mixed_weight) | |
] | |
multi_results = simulate_multimodal_srs( | |
baseline_retention=float(target_retention), | |
modality_weights=modality_weights, | |
learning_days=int(learning_days), | |
cards_per_day=int(deck_size) // int(learning_days), | |
initial_ease=2.5, | |
learning_rate=float(learning_rate), | |
max_cost_perday=max_cost_perday, | |
progress=progress | |
) | |
# Create visualization plots | |
plots = create_combined_plots( | |
fsrs_review_cnt, | |
fsrs_learn_cnt, | |
fsrs_memorized_cnt, | |
multi_results, | |
int(learning_days) | |
) | |
# Generate recommendations | |
recommendations = generate_recommendations( | |
fsrs_review_cnt, | |
multi_results, | |
int(learning_days), | |
target_retention, | |
modality_weights | |
) | |
return plots + [recommendations] | |
def create_combined_plots(fsrs_review_cnt, fsrs_learn_cnt, fsrs_memorized_cnt, multi_results, learning_days): | |
"""Create visualization plots from both simulation results""" | |
# Ensure smooth window size is reasonable | |
smooth_window = min(7, learning_days // 10) | |
if smooth_window < 2: | |
smooth_window = 2 | |
# Plot 1: Review Counts Comparison | |
fig1 = plt.figure(figsize=(10, 6)) | |
ax = fig1.add_subplot(111) | |
if len(fsrs_review_cnt) > smooth_window: | |
ax.plot(moving_average(fsrs_review_cnt, smooth_window), 'b-', | |
label='Standard SRS Reviews') | |
else: | |
ax.plot(fsrs_review_cnt, 'b-', label='Standard SRS Reviews') | |
if len(multi_results['reviews_per_day']) > smooth_window: | |
ax.plot(moving_average(multi_results['reviews_per_day'], smooth_window), 'r-', | |
label='Multimodal SRS Reviews') | |
else: | |
ax.plot(multi_results['reviews_per_day'], 'r-', label='Multimodal SRS Reviews') | |
ax.set_xlabel('Day') | |
ax.set_ylabel('Number of Reviews') | |
ax.set_title('Review Counts: Standard vs Multimodal SRS') | |
ax.legend() | |
# Plot 2: Retention & Memorization | |
fig2 = plt.figure(figsize=(10, 6)) | |
ax1 = fig2.add_subplot(111) | |
if len(multi_results['retention_per_day']) > smooth_window: | |
ax1.plot(moving_average(multi_results['retention_per_day'], smooth_window), 'g-', | |
label='Multimodal Retention Rate') | |
else: | |
ax1.plot(multi_results['retention_per_day'], 'g-', label='Multimodal Retention Rate') | |
ax1.set_xlabel('Day') | |
ax1.set_ylabel('Retention Rate') | |
ax1.set_ylim(0, 1.0) | |
ax1.legend(loc='upper left') | |
ax2 = ax1.twinx() | |
ax2.plot(fsrs_memorized_cnt, 'b--', label='Standard SRS Cumulative Memorized') | |
ax2.set_ylabel('Cumulative Memorized Items') | |
ax2.legend(loc='upper right') | |
ax1.set_title('Retention Rate & Memorized Items') | |
# Plot 3: Modality Effectiveness | |
fig3 = plt.figure(figsize=(10, 6)) | |
ax = fig3.add_subplot(111) | |
for mod in ContentModality: | |
effectiveness = multi_results['modality_effectiveness'][mod] | |
if len(effectiveness) > smooth_window: | |
smooth_eff = moving_average(effectiveness, smooth_window) | |
ax.plot(range(len(smooth_eff)), smooth_eff, label=mod.name) | |
else: | |
ax.plot(effectiveness, label=mod.name) | |
ax.set_xlabel('Day') | |
ax.set_ylabel('Success Rate') | |
ax.set_ylim(0, 1.0) | |
ax.set_title('Modality Effectiveness Over Time') | |
ax.legend() | |
# Plot 4: Modality Usage Over Time | |
fig4 = plt.figure(figsize=(10, 6)) | |
ax = fig4.add_subplot(111) | |
modality_data = [] | |
mod_labels = [] | |
for mod in ContentModality: | |
usage_data = multi_results['modality_usage'][mod] | |
if len(usage_data) > smooth_window: | |
modality_data.append(moving_average(usage_data, smooth_window)) | |
else: | |
modality_data.append(usage_data) | |
mod_labels.append(mod.name) | |
modality_data = np.array(modality_data) | |
# Create stacked area plot | |
x = range(len(modality_data[0])) | |
ax.stackplot(x, modality_data, labels=mod_labels) | |
ax.set_xlabel('Day') | |
ax.set_ylabel('Number of Reviews') | |
ax.set_title('Modality Distribution Over Time') | |
ax.legend() | |
return [fig1, fig2, fig3, fig4] | |
def generate_recommendations(fsrs_review_cnt, multi_results, learning_days, target_retention, modality_weights): | |
"""Generate personalized recommendations based on simulation results""" | |
# Find most effective modalities | |
modality_avg_effectiveness = {} | |
for mod in ContentModality: | |
effectiveness = multi_results['modality_effectiveness'][mod] | |
# Calculate average of last 25% of days to get mature effectiveness | |
start_idx = max(0, int(learning_days * 0.75)) | |
avg_eff = np.mean(effectiveness[start_idx:]) if len(effectiveness) > start_idx else np.mean(effectiveness) | |
modality_avg_effectiveness[mod] = avg_eff | |
# Sort modalities by effectiveness | |
sorted_modalities = sorted(modality_avg_effectiveness.items(), key=lambda x: x[1], reverse=True) | |
# Analyze review patterns | |
avg_reviews_std = np.mean(fsrs_review_cnt) | |
peak_reviews_std = np.max(fsrs_review_cnt) | |
avg_reviews_multi = np.mean(multi_results['reviews_per_day']) | |
# Calculate efficiency gain | |
std_retention = np.mean(fsrs_review_cnt[-10:]) / np.mean(fsrs_review_cnt[:10]) if len(fsrs_review_cnt) > 10 else 1 | |
multi_retention = multi_results['final_retention'] | |
efficiency_gain = (multi_retention / float(target_retention)) / (avg_reviews_multi / avg_reviews_std) | |
# Generate recommendations | |
top_modalities = [mod.name for mod, _ in sorted_modalities[:3]] | |
# Dynamic time period calculations based on learning_days | |
total_period = learning_days | |
# Scale intervals based on learning period length | |
if total_period <= 30: # Short learning period | |
initial_interval = (1, 1) | |
second_interval = (1, 2) | |
third_interval = (2, 3) | |
long_term_start = "Week 2+" | |
elif total_period <= 90: # Medium learning period | |
initial_interval = (1, 2) | |
second_interval = (2, 4) | |
third_interval = (4, 7) | |
long_term_start = "Week 4+" | |
else: # Long learning period | |
initial_interval = (1, 3) | |
second_interval = (3, 6) | |
third_interval = (6, 10) | |
long_term_start = "Month 2+" | |
# Calculate period durations (as percentage of total learning period) | |
initial_period = max(1, int(total_period * 0.1)) # 10% of learning period | |
second_period = max(1, int(total_period * 0.15)) # 15% of learning period | |
third_period = max(1, int(total_period * 0.25)) # 25% of learning period | |
# Format period text based on learning days | |
if total_period < 14: | |
period_unit = "days" | |
initial_text = f"Days 1-{initial_period}" | |
second_text = f"Days {initial_period + 1}-{initial_period + second_period}" | |
third_text = f"Days {initial_period + second_period + 1}-{initial_period + second_period + third_period}" | |
elif total_period < 60: | |
period_unit = "weeks" | |
initial_text = f"Week 1" | |
second_text = f"Week 2" | |
third_text = f"Weeks 3-4" | |
else: | |
period_unit = "months" | |
initial_text = f"Month 1" | |
second_text = f"Month 2" | |
third_text = f"Month 3" | |
recommendation = f""" | |
# Learning Optimization Recommendations | |
## Target Retention Analysis | |
- Target retention rate: {float(target_retention):.1%} | |
- Achieved retention with multimodal approach: {multi_retention:.1%} | |
- Estimated learning efficiency gain: {efficiency_gain:.2f}x | |
## Optimal Modality Recommendations | |
Based on the simulation, the most effective learning modalities for you are: | |
1. **{top_modalities[0]}** (Primary) - Use for initial learning and difficult content | |
2. **{top_modalities[1]}** (Secondary) - Use for reinforcement and review | |
3. **{top_modalities[2]}** (Supplementary) - Use for variety and to prevent fatigue | |
## Review Schedule Optimization | |
- Optimal workload per day: {int(min(20, avg_reviews_std / 3))} | |
- Recommended review sessions: {2 if avg_reviews_std > 30 else 1} per day | |
## Spaced Repetition Strategy | |
- **{initial_text}:** Focus on using {top_modalities[0]} modality with shorter intervals ({initial_interval[0]}-{initial_interval[1]} {period_unit}) | |
- **{second_text}:** Introduce {top_modalities[1]} modality and extend intervals ({second_interval[0]}-{second_interval[1]} {period_unit}) | |
- **{third_text}:** Begin mixing in {top_modalities[2]} for variety and extend intervals ({third_interval[0]}-{third_interval[1]} {period_unit}) | |
- **{long_term_start}:** Prioritize tough content in {top_modalities[0]} format, and maintain variety with other modalities | |
## Estimated Results | |
Following this personalized approach should help you: | |
- Reduce total review time by approximately {min(75, int(100 * (1 - 1 / efficiency_gain)))}% | |
- Reach your target retention rate of {float(target_retention):.1%} or higher | |
- Maintain knowledge for longer periods with less review | |
""" | |
return recommendation | |
# Create the Gradio interface | |
title = """ | |
# CS6460-Ed Tech: Comprehensive Multimodal Spaced Repetition Learning Dashboard | |
This dashboard combines two powerful learning optimization approaches: | |
1. **Free Spaced Repetition Scheduler (FSRS)** - An advanced algorithm for optimal review timing | |
2. **Multimodal Learning System** - A system that adapts content presentation to your learning preferences | |
## Parameter Settings | |
- **Preset Parameters**: These are pre-calibrated values based on research data that define the underlying models | |
- FSRS Model Parameters: Define the mathematical model for spaced repetition intervals | |
- Multimodal Weights: Define the effectiveness of different learning modalities | |
- **Those preset parameters are for user personal learning patterns, since all of them are customized parameters, I leave default values here for now.** | |
- **Customizable Settings**: These are parameters you can adjust based on your specific learning scenario | |
- Learning Period & Time: How long and how much time per day you plan to study | |
- Target Retention: The memory retention rate you aim to achieve | |
- Knowledge Load: How much material you need to learn | |
## How to Use This Dashboard | |
1. **Configure Settings** (Parameter Settings tab): | |
- Adjust the preset parameters if you have specific data about your learning preferences | |
- Set your customizable settings based on your actual study plan and goals | |
- Click "Run Simulation" to process your configuration | |
2. **Review Analysis** (Analysis tab): | |
- Compare standard vs. multimodal review patterns | |
- Examine retention rates over time | |
- Understand which modalities are most effective for your learning style | |
- See how modality usage evolves as the system adapts to your preferences | |
3. **Apply Recommendations** (Recommendations tab): | |
- Review the personalized learning strategy based on simulation results | |
- Follow the suggested spaced repetition schedule and modality mix | |
- Apply the recommendations to your actual study plan | |
Adjust the parameters below to see how different settings affect your learning efficiency, | |
and get personalized recommendations for optimizing your study approach. | |
""" | |
with gr.Blocks() as demo: | |
gr.Markdown(title) | |
with gr.Tab("Parameter Settings"): | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown("### Spaced Repetition (FSRS) Settings") | |
fsrs_weights = gr.Textbox( | |
label="Model Super-Parameter", | |
value="0.4, 0.6, 2.4, 5.8, 4.93, 0.94, 0.86, 0.01, 1.49, 0.14, 0.94, 2.18, 0.05, 0.34" | |
) | |
retrievability = gr.Textbox(label="Retrievability", value="0.9") | |
stability = gr.Textbox(label="Stability", value="0.95") | |
difficulty = gr.Textbox(label="Difficulty", value="1.06") | |
with gr.Column(): | |
gr.Markdown("### Multimodal Learning Settings") | |
text_weight = gr.Slider(minimum=0.5, maximum=2.0, value=1.0, step=0.1, label="Text Effectiveness") | |
image_weight = gr.Slider(minimum=0.5, maximum=2.0, value=1.2, step=0.1, label="Image Effectiveness") | |
audio_weight = gr.Slider(minimum=0.5, maximum=2.0, value=0.9, step=0.1, label="Audio Effectiveness") | |
video_weight = gr.Slider(minimum=0.5, maximum=2.0, value=1.3, step=0.1, label="Video Effectiveness") | |
interactive_weight = gr.Slider(minimum=0.5, maximum=2.0, value=1.4, step=0.1, | |
label="Interactive Effectiveness") | |
mixed_weight = gr.Slider(minimum=0.5, maximum=2.0, value=1.1, step=0.1, label="Mixed Effectiveness") | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown("### Shared Learning Parameters (Set Customized Parameters Here)") | |
target_retention = gr.Slider( | |
label="Target Recall Rate", | |
minimum=0.7, | |
maximum=0.99, | |
value=0.9, | |
step=0.01 | |
) | |
learning_time = gr.Slider( | |
label="Learning Time Per Day (minutes)", | |
minimum=5, | |
maximum=120, | |
value=30, | |
step=5 | |
) | |
learning_days = gr.Slider( | |
label="Learning Period (days)", | |
minimum=30, | |
maximum=365, | |
value=100, | |
step=5 | |
) | |
deck_size = gr.Slider( | |
label="Knowledge Load", | |
minimum=100, | |
maximum=10000, | |
value=1000, | |
step=100 | |
) | |
with gr.Column(): | |
max_ivl = gr.Slider( | |
label="Maximum Interval (days)", | |
minimum=1, | |
maximum=365, | |
value=36, | |
step=1 | |
) | |
recall_cost = gr.Slider( | |
label="Review Cost (seconds)", | |
minimum=1, | |
maximum=60, | |
value=10, | |
step=1 | |
) | |
forget_cost = gr.Slider( | |
label="Relearn Cost (seconds)", | |
minimum=1, | |
maximum=120, | |
value=30, | |
step=1 | |
) | |
learn_cost = gr.Slider( | |
label="Learn Cost (seconds)", | |
minimum=1, | |
maximum=60, | |
value=10, | |
step=1 | |
) | |
learning_rate = gr.Slider( | |
label="Learning Rate", | |
minimum=0.01, | |
maximum=0.2, | |
value=0.05, | |
step=0.01 | |
) | |
run_btn = gr.Button("Run Simulation", variant="primary") | |
with gr.Tab("Analysis"): | |
with gr.Row(): | |
plot1 = gr.Plot(label="Review Counts: Standard vs Multimodal") | |
plot2 = gr.Plot(label="Retention Rate & Memorized Items") | |
with gr.Row(): | |
plot3 = gr.Plot(label="Modality Effectiveness Over Time") | |
plot4 = gr.Plot(label="Modality Distribution Over Time") | |
with gr.Tab("Recommendations"): | |
recommendations = gr.Markdown(label="Personalized Learning Recommendations") | |
# Connect the button to the function | |
run_btn.click( | |
fn=run_combined_simulation, | |
inputs=[ | |
fsrs_weights, retrievability, stability, difficulty, | |
text_weight, image_weight, audio_weight, video_weight, interactive_weight, mixed_weight, | |
target_retention, learning_time, learning_days, deck_size, max_ivl, | |
recall_cost, forget_cost, learn_cost, learning_rate | |
], | |
outputs=[plot1, plot2, plot3, plot4, recommendations] | |
) | |
if __name__ == "__main__": | |
demo.launch(show_error=True,share=True) |