{ "name": "Summarization Fine-tuning Dataset", "description": "A dataset for fine-tuning small language models on summarization tasks", "format": "alpaca", "statistics": { "total_examples": 2000, "train_examples": 1600, "val_examples": 200, "test_examples": 200, "dataset_distribution": { "xsum": { "count": 2000, "percentage": 100.0 } } }, "configuration": { "max_tokens": 2000, "tokenizer": "gpt2", "seed": 42 } }