text
stringlengths 7
318k
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
439
|
---|---|---|---|
import argparse
import logging
import os
import sys
import time
import tensorflow as tf
from datasets import load_dataset
from tqdm import tqdm
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
from transformers.modeling_tf_utils import keras
from transformers.utils import is_sagemaker_dp_enabled
if os.environ.get("SDP_ENABLED") or is_sagemaker_dp_enabled():
SDP_ENABLED = True
os.environ["SAGEMAKER_INSTANCE_TYPE"] = "p3dn.24xlarge"
import smdistributed.dataparallel.tensorflow as sdp
else:
SDP_ENABLED = False
def fit(model, loss, opt, train_dataset, epochs, train_batch_size, max_steps=None):
pbar = tqdm(train_dataset)
for i, batch in enumerate(pbar):
with tf.GradientTape() as tape:
inputs, targets = batch
outputs = model(batch)
loss_value = loss(targets, outputs.logits)
if SDP_ENABLED:
tape = sdp.DistributedGradientTape(tape, sparse_as_dense=True)
grads = tape.gradient(loss_value, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
pbar.set_description(f"Loss: {loss_value:.4f}")
if SDP_ENABLED and i == 0:
sdp.broadcast_variables(model.variables, root_rank=0)
sdp.broadcast_variables(opt.variables(), root_rank=0)
if max_steps and i >= max_steps:
break
train_results = {"loss": loss_value.numpy()}
return train_results
def get_datasets(tokenizer, train_batch_size, eval_batch_size):
# Load dataset
train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"])
# Preprocess train dataset
train_dataset = train_dataset.map(
lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True
)
train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"])
train_features = {
x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length])
for x in ["input_ids", "attention_mask"]
}
tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"]))
# Preprocess test dataset
test_dataset = test_dataset.map(
lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True
)
test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"])
test_features = {
x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length])
for x in ["input_ids", "attention_mask"]
}
tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"]))
if SDP_ENABLED:
tf_train_dataset = tf_train_dataset.shard(sdp.size(), sdp.rank())
tf_test_dataset = tf_test_dataset.shard(sdp.size(), sdp.rank())
tf_train_dataset = tf_train_dataset.batch(train_batch_size, drop_remainder=True)
tf_test_dataset = tf_test_dataset.batch(eval_batch_size, drop_remainder=True)
return tf_train_dataset, tf_test_dataset
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--epochs", type=int, default=3)
parser.add_argument("--per_device_train_batch_size", type=int, default=16)
parser.add_argument("--per_device_eval_batch_size", type=int, default=8)
parser.add_argument("--model_name_or_path", type=str)
parser.add_argument("--learning_rate", type=str, default=5e-5)
parser.add_argument("--do_train", type=bool, default=True)
parser.add_argument("--do_eval", type=bool, default=True)
parser.add_argument("--output_dir", type=str)
parser.add_argument("--max_steps", type=int, default=None)
# Data, model, and output directories
parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"])
parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"])
args, _ = parser.parse_known_args()
# Set up logging
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.getLevelName("INFO"),
handlers=[logging.StreamHandler(sys.stdout)],
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
if SDP_ENABLED:
sdp.init()
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[sdp.local_rank()], "GPU")
# Load model and tokenizer
model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
# get datasets
tf_train_dataset, tf_test_dataset = get_datasets(
tokenizer=tokenizer,
train_batch_size=args.per_device_train_batch_size,
eval_batch_size=args.per_device_eval_batch_size,
)
# fine optimizer and loss
optimizer = keras.optimizers.Adam(learning_rate=args.learning_rate)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = [keras.metrics.SparseCategoricalAccuracy()]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# Training
if args.do_train:
# train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.train_batch_size)
start_train_time = time.time()
train_results = fit(
model,
loss,
optimizer,
tf_train_dataset,
args.epochs,
args.per_device_train_batch_size,
max_steps=args.max_steps,
)
end_train_time = time.time() - start_train_time
logger.info("*** Train ***")
logger.info(f"train_runtime = {end_train_time}")
output_eval_file = os.path.join(args.output_dir, "train_results.txt")
if not SDP_ENABLED or sdp.rank() == 0:
with open(output_eval_file, "w") as writer:
logger.info("***** Train results *****")
logger.info(train_results)
for key, value in train_results.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
# Evaluation
if args.do_eval and (not SDP_ENABLED or sdp.rank() == 0):
result = model.evaluate(tf_test_dataset, batch_size=args.per_device_eval_batch_size, return_dict=True)
logger.info("*** Evaluate ***")
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
logger.info(result)
for key, value in result.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
# Save result
if SDP_ENABLED:
if sdp.rank() == 0:
model.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
else:
model.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
| transformers/tests/sagemaker/scripts/tensorflow/run_tf_dist.py/0 | {
"file_path": "transformers/tests/sagemaker/scripts/tensorflow/run_tf_dist.py",
"repo_id": "transformers",
"token_count": 3191
} | 416 |
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import inspect
import json
import os
import random
import tempfile
import unittest
from importlib import import_module
from math import isnan
from typing import List, Tuple
from datasets import Dataset
from transformers import is_tf_available, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import ( # noqa: F401
CaptureLogger,
_tf_gpu_memory_limit,
is_pt_tf_cross_test,
require_tf,
require_tf2onnx,
slow,
torch_device,
)
from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging
from transformers.utils.generic import ModelOutput
logger = logging.get_logger(__name__)
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TFAutoModel,
TFAutoModelForSequenceClassification,
TFSharedEmbeddings,
)
from transformers.generation import (
TFBeamSampleDecoderOnlyOutput,
TFBeamSampleEncoderDecoderOutput,
TFBeamSearchDecoderOnlyOutput,
TFBeamSearchEncoderDecoderOutput,
TFGreedySearchDecoderOnlyOutput,
TFGreedySearchEncoderDecoderOutput,
TFSampleDecoderOnlyOutput,
TFSampleEncoderDecoderOutput,
)
from transformers.modeling_tf_utils import keras
tf.config.experimental.enable_tensor_float_32_execution(False)
if _tf_gpu_memory_limit is not None:
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
# Restrict TensorFlow to only allocate x GB of memory on the GPUs
try:
tf.config.set_logical_device_configuration(
gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print("Logical GPUs", logical_gpus)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
if is_torch_available():
import torch
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key:
setattr(configs_no_init, key, 0.0)
return configs_no_init
@require_tf
class TFModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_mismatched_shapes = True
test_resize_embeddings = True
test_head_masking = True
is_encoder_decoder = False
has_attentions = True
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict = {
k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(v, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING),
*get_values(TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING),
]:
inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING),
*get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING),
]:
inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING):
inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING),
*get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING),
*get_values(TF_MODEL_FOR_MASKED_LM_MAPPING),
*get_values(TF_MODEL_FOR_PRETRAINING_MAPPING),
*get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING),
*get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING),
] and "labels" in dict(inspect.signature(model_class.call).parameters):
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
elif model_class in get_values(TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING):
num_patches = self.model_tester.image_size // self.model_tester.patch_size
inputs_dict["bool_masked_pos"] = tf.zeros(
(self.model_tester.batch_size, num_patches**2), dtype=tf.int32
)
elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING):
batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape
inputs_dict["labels"] = tf.zeros((self.model_tester.batch_size, height, width), dtype=tf.int32)
elif model_class.__name__.endswith("ForCTC"):
# When we have enough CTC models for an AutoClass, we should use their mapping instead of name checks
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
return inputs_dict
def test_initialization(self):
pass
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
# the config file (and the generation config file, if it can generate) should be saved
self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME)))
self.assertEqual(
model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME))
)
model = model_class.from_pretrained(tmpdirname)
after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
def test_save_load_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
model_config = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(model_config)
new_model = model_class.from_config(model.get_config())
# make sure it also accepts a normal config
_ = model_class.from_config(model.config)
_ = new_model(self._prepare_for_class(inputs_dict, model_class)) # Build model
new_model.set_weights(model.get_weights())
after_outputs = new_model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
@slow
def test_saved_model_creation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = False
config.output_attentions = False
if hasattr(config, "use_cache"):
config.use_cache = False
model_class = self.all_model_classes[0]
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
self.assertTrue(os.path.exists(saved_model_dir))
def test_prepare_serving_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = self.has_attentions
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(inputs)
serving_outputs = model.serving_output(outputs)
for k, v in serving_outputs.items():
# Check that we have one of three possible outputs: None, tuple of tensors or a tensor
if isinstance(v, tuple):
self.assertTrue(all(isinstance(elem, tf.Tensor) for elem in v))
elif v is not None:
self.assertIsInstance(v, tf.Tensor)
else:
self.assertIsNone(v)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(["decoder_position_ids"] if "decoder_position_ids" in arg_names else [])
expected_arg_names.extend(
["head_mask", "decoder_head_mask"] if "head_mask" and "decoder_head_mask" in arg_names else []
)
expected_arg_names.extend(
["cross_attn_head_mask", "encoder_outputs"]
if "cross_attn_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_onnx_compliancy(self):
if not self.test_onnx:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
INTERNAL_OPS = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"ReadVariableOp",
"ResourceGather",
"TruncatedNormal",
"VarHandleOp",
"VarIsInitializedOp",
]
onnx_ops = []
with open(os.path.join(".", "utils", "tf_ops", "onnx.json")) as f:
onnx_opsets = json.load(f)["opsets"]
for i in range(1, self.onnx_min_opset + 1):
onnx_ops.extend(onnx_opsets[str(i)])
for model_class in self.all_model_classes:
model_op_names = set()
with tf.Graph().as_default() as g:
model = model_class(config)
model.build_in_name_scope()
for op in g.get_operations():
model_op_names.add(op.node_def.op)
model_op_names = sorted(model_op_names)
incompatible_ops = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(op)
self.assertEqual(len(incompatible_ops), 0, incompatible_ops)
# `tf2onnx` issue page: https://github.com/onnx/tensorflow-onnx/issues/2172
# TODO: undo skip once a fix is done in `tf2onnx`
@unittest.skip("`tf2onnx` broke with TF 2.13")
@require_tf2onnx
@slow
def test_onnx_runtime_optimize(self):
if not self.test_onnx:
return
import onnxruntime
import tf2onnx
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:2]:
model = model_class(config)
model.build_in_name_scope()
onnx_model_proto, _ = tf2onnx.convert.from_keras(model, opset=self.onnx_min_opset)
onnxruntime.InferenceSession(onnx_model_proto.SerializeToString())
def test_keras_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")]
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
}
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(99, 32, name="shared")
config.use_cache = inputs_dict.pop("use_cache", None)
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
model = keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
outputs = model(inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, keras.Model)
after_outputs = model(inputs_dict)
self.assert_outputs_same(after_outputs, outputs)
def assert_outputs_same(self, after_outputs, outputs):
# Make sure we don't have nans
if isinstance(after_outputs, tf.Tensor):
out_1 = after_outputs.numpy()
elif isinstance(after_outputs, dict):
out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
else:
out_1 = after_outputs[0].numpy()
out_2 = outputs[0].numpy()
self.assertEqual(out_1.shape, out_2.shape)
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
# Don't copy this method to model specific test file!
# TODO: remove this method once the issues are all fixed!
def _make_attention_mask_non_null(self, inputs_dict):
"""Make sure no sequence has all zeros as attention mask"""
for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]:
if k in inputs_dict:
attention_mask = inputs_dict[k]
# Make sure no all 0s attention masks - to avoid failure at this moment.
# Put `1` at the beginning of sequences to make it still work when combining causal attention masks.
# TODO: remove this line once a fix regarding large negative values for attention mask is done.
attention_mask = tf.concat(
[tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1
)
# Here we make the first sequence with all 0s as attention mask.
# Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative
# values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks.
# TODO: enable this block once the large negative values thing is cleaned up.
# (see https://github.com/huggingface/transformers/issues/14859)
# attention_mask = tf.concat(
# [
# tf.zeros_like(attention_mask[:1], dtype=tf.int32),
# tf.cast(attention_mask[1:], dtype=tf.int32)
# ],
# axis=0
# )
inputs_dict[k] = attention_mask
# Don't copy this method to model specific test file!
# TODO: remove this method once the issues are all fixed!
def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class):
"""For temporarily ignoring some failed test cases (issues to be fixed)"""
tf_keys = {k for k, v in tf_outputs.items() if v is not None}
pt_keys = {k for k, v in pt_outputs.items() if v is not None}
key_differences = tf_keys.symmetric_difference(pt_keys)
if model_class.__name__ in [
"TFFlaubertWithLMHeadModel",
"TFFunnelForPreTraining",
"TFElectraForPreTraining",
"TFXLMWithLMHeadModel",
]:
for k in key_differences:
if k in ["loss", "losses"]:
tf_keys.discard(k)
pt_keys.discard(k)
elif model_class.__name__.startswith("TFGPT2"):
# `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple.
tf_keys.discard("past_key_values")
pt_keys.discard("past_key_values")
# create new outputs from the remaining fields
new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys})
new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys})
return new_tf_outputs, new_pt_outputs
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None):
"""Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way.
Args:
model_class: The class of the model that is currently testing. For example, `TFBertModel`,
TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative
error messages.
name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc.
attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element
being a named field in the output.
"""
self.assertEqual(type(name), str)
if attributes is not None:
self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`")
# Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`).
if isinstance(tf_outputs, ModelOutput):
self.assertTrue(
isinstance(pt_outputs, ModelOutput),
f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is",
)
# Don't copy this block to model specific test file!
# TODO: remove this method and this line after issues are fixed
tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class)
tf_keys = [k for k, v in tf_outputs.items() if v is not None]
pt_keys = [k for k, v in pt_outputs.items() if v is not None]
self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch")
# convert to the case of `tuple`
# appending each key to the current (string) `names`
attributes = tuple([f"{name}.{k}" for k in tf_keys])
self.check_pt_tf_outputs(
tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes
)
# Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.)
elif type(tf_outputs) in [tuple, list]:
self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch")
self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch")
if attributes is not None:
# case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`)
self.assertEqual(
len(attributes),
len(tf_outputs),
f"{name}: The tuple `names` should have the same length as `tf_outputs`",
)
else:
# case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names`
attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))])
for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes):
self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr)
elif isinstance(tf_outputs, tf.Tensor):
self.assertTrue(
isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is"
)
tf_outputs = tf_outputs.numpy()
pt_outputs = pt_outputs.detach().to("cpu").numpy()
self.assertEqual(
tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch"
)
# deal with NumPy's scalars to make replacing nan values by 0 work.
if np.isscalar(tf_outputs):
tf_outputs = np.array([tf_outputs])
pt_outputs = np.array([pt_outputs])
tf_nans = np.isnan(tf_outputs)
pt_nans = np.isnan(pt_outputs)
pt_outputs[tf_nans] = 0
tf_outputs[tf_nans] = 0
pt_outputs[pt_nans] = 0
tf_outputs[pt_nans] = 0
max_diff = np.amax(np.abs(tf_outputs - pt_outputs))
self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).")
else:
raise ValueError(
"`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got"
f" {type(tf_outputs)} instead."
)
def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
pt_inputs_dict = {}
for name, key in tf_inputs_dict.items():
if isinstance(key, bool):
pt_inputs_dict[name] = key
elif name == "input_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
elif name == "pixel_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
elif name == "input_features":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
# other general float inputs
elif tf_inputs_dict[name].dtype.is_floating:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
return pt_inputs_dict
def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict):
pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict)
# send pytorch inputs to the correct device
pt_inputs_dict = {
k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items()
}
# send pytorch model to the correct device
pt_model.to(torch_device)
# Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences
pt_model.eval()
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs_dict)
tf_outputs = tf_model(tf_inputs_dict)
# tf models returned loss is usually a tensor rather than a scalar.
# (see `hf_compute_loss`: it uses `keras.losses.Reduction.NONE`)
# Change it here to a scalar to match PyTorch models' loss
tf_loss = getattr(tf_outputs, "loss", None)
if tf_loss is not None:
tf_outputs.loss = tf.math.reduce_mean(tf_loss)
self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model))
@is_pt_tf_cross_test
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
import transformers
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Output all for aggressive testing
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
# TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it.
self._make_attention_mask_non_null(inputs_dict)
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
tf_model = model_class(config)
pt_model = pt_model_class(config)
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
tf_inputs_dict_with_labels = self._prepare_for_class(
inputs_dict,
model_class,
# Not all models accept "labels" in the forward pass (yet :) )
return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False,
)
# For some models (e.g. base models), there is no label returned.
# Set the input dict to `None` to avoid check outputs twice for the same input dicts.
if not set(tf_inputs_dict_with_labels.keys()).symmetric_difference(tf_inputs_dict.keys()):
tf_inputs_dict_with_labels = None
# Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(
tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys
)
pt_model = transformers.load_tf2_model_in_pytorch_model(
pt_model, tf_model, allow_missing_keys=allow_missing_keys
)
# Original test: check without `labels`
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
# check with `labels`
if tf_inputs_dict_with_labels:
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels)
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
with tempfile.TemporaryDirectory() as tmpdirname:
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
torch.save(pt_model.state_dict(), pt_checkpoint_path)
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(
tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys
)
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
tf_model.save_weights(tf_checkpoint_path)
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(
pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys
)
# Original test: check without `labels`
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
# check with `labels`
if tf_inputs_dict_with_labels:
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels)
@slow
def test_compile_tf_model(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:2]:
# Prepare our model
model = model_class(config)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
functional_inputs = {
key: keras.Input(shape=val.shape[1:], dtype=val.dtype, name=key)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
outputs_dict = model(functional_inputs)
hidden_states = outputs_dict[0]
# Compile extended model
functional_model = keras.Model(inputs=functional_inputs, outputs=hidden_states)
model_out = functional_model.predict(model.dummy_inputs) # Check we can pass inputs with the Keras API
self.assertTrue(model_out is not None)
with tempfile.TemporaryDirectory() as tmpdirname:
functional_model.save(tmpdirname) # Ensure we can save/export the whole functional model
def test_keyword_and_dict_args(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs_dict = model(inputs)
inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
outputs_keywords = model(**inputs_keywords)
output_dict = outputs_dict[0].numpy()
output_keywords = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)
def test_attention_outputs(self):
if not self.has_attentions:
self.skipTest(reason="Model does not output attentions")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
def check_decoder_attentions_output(outputs):
out_len = len(outputs)
self.assertEqual(min(out_len % 2, out_len % 5), 0) # differentiation due to newly added cross_attentions
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
def check_encoder_attentions_output(outputs):
attentions = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
def test_headmasking(self):
if not self.test_head_masking:
return
random.Random().seed(42)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
random.Random().seed()
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
# Prepare head_mask
def prepare_layer_head_mask(i, attention_heads, num_hidden_layers):
if i == 0:
return tf.concat(
(tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0
)
elif i == num_hidden_layers - 1:
return tf.concat(
(tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0
)
else:
return tf.ones(attention_heads, dtype=tf.float32)
head_mask = tf.stack(
[
prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers)
for i in range(config.num_hidden_layers)
],
0,
)
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
inputs["head_mask"] = head_mask
if model.config.is_encoder_decoder:
signature = inspect.signature(model.call)
arg_names = [*signature.parameters.keys()]
if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model
inputs["decoder_head_mask"] = head_mask
if "cross_attn_head_mask" in arg_names:
inputs["cross_attn_head_mask"] = head_mask
outputs = model(**inputs, return_dict=True)
def check_attentions_validity(attentions):
# Remove Nan
for t in attentions:
self.assertLess(
(tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy()
) # Check we don't have more than 25% nans (arbitrary)
attentions = [
tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions
] # remove them (the test is less complete)
self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0)
self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0)
if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules
self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0)
self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0)
self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0)
if model.config.is_encoder_decoder:
check_attentions_validity(outputs.encoder_attentions)
check_attentions_validity(outputs.decoder_attentions)
if "cross_attn_head_mask" in arg_names:
check_attentions_validity(outputs.cross_attentions)
else:
check_attentions_validity(outputs.attentions)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_hidden_states_output(config, inputs_dict, model_class):
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model.config.is_encoder_decoder:
encoder_hidden_states = outputs.encoder_hidden_states
decoder_hidden_states = outputs.decoder_hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(encoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(encoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(decoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(decoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
else:
hidden_states = outputs.hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(config, inputs_dict, model_class)
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(config, inputs_dict, model_class)
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
text_in_text_out_models = (
get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING)
+ get_values(TF_MODEL_FOR_MASKED_LM_MAPPING)
+ get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
)
speech_in_text_out_models = get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING)
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), keras.layers.Layer)
legacy_text_in_text_out = model.get_lm_head() is not None
if model_class in text_in_text_out_models or legacy_text_in_text_out:
out_embeddings = model.get_output_embeddings()
self.assertIsInstance(out_embeddings, keras.layers.Layer)
bias = model.get_bias()
if bias is not None:
self.assertIsInstance(bias, dict)
for _, v in bias.items():
self.assertIsInstance(v, tf.Variable)
elif model_class in speech_in_text_out_models:
out_embeddings = model.get_output_embeddings()
self.assertIsInstance(out_embeddings, keras.layers.Layer)
bias = model.get_bias()
self.assertIsNone(bias)
else:
out_embeddings = model.get_output_embeddings()
assert out_embeddings is None
bias = model.get_bias()
self.assertIsNone(bias)
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
first, second = (
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
)
out_1 = first.numpy()
out_2 = second.numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(tuple_object, dict_object)),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
if self.has_attentions:
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
# Not all models accept "labels" in the forward pass (yet :) )
if "labels" in inspect.signature(model.call).parameters.keys():
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
if self.has_attentions:
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(inputs_dict)
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
else:
inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)
inputs = self._prepare_for_class(inputs, model_class)
model(inputs)
def test_numpy_arrays_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def prepare_numpy_arrays(inputs_dict):
inputs_np_dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(v):
inputs_np_dict[k] = v.numpy()
else:
inputs_np_dict[k] = np.array(k)
return inputs_np_dict
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
inputs_np = prepare_numpy_arrays(inputs)
output_for_dict_input = model(inputs_np)
output_for_kw_input = model(**inputs_np)
self.assert_outputs_same(output_for_dict_input, output_for_kw_input)
def test_valid_input_signature_and_dummies(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
call_args = inspect.signature(model.call).parameters
for key in model.input_signature:
self.assertIn(key, call_args)
for key in model.dummy_inputs:
self.assertIn(key, call_args)
def test_resize_token_embeddings(self):
# TODO (joao): after the embeddings refactor is complete, rework this test so as to rely exclusively on
# keras.layers.Embedding
if not self.test_resize_embeddings:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if isinstance(embedding_layer, keras.layers.Embedding):
# builds the embeddings layer
model.build_in_name_scope()
return embedding_layer.embeddings
else:
return model._get_word_embedding_weight(embedding_layer)
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config`
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_bias = model.get_bias()
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_bias = model.get_bias()
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_bias is not None and new_bias is not None:
for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
self.assertEqual(new_weight.shape[-1], assert_size)
models_equal = True
for p1, p2 in zip(tf.squeeze(old_weight), tf.squeeze(new_weight)):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
# TODO (Joao): this test is not slow, but it's tagged as such to keep track of failures on the scheduled CI runs,
# while passing push CI. Fix the underlying issues and remove the tag.
@slow
def test_save_load_after_resize_token_embeddings(self):
if not self.test_resize_embeddings:
return
config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# create a model with resized (expended) embeddings
new_tokens_size = 10
old_total_size = config.vocab_size
new_total_size = old_total_size + new_tokens_size
model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config`
model.build_in_name_scope()
model.resize_token_embeddings(new_total_size)
# fetch the output for an input exclusively made of new members of the vocabulary
inputs_dict = copy.deepcopy(original_inputs_dict)
ids_feat_name = None
if "input_ids" in inputs_dict:
ids_feat_name = "input_ids"
elif "decoder_input_ids" in inputs_dict:
ids_feat_name = "decoder_input_ids"
else:
assert False, "No input ids feature found in the inputs dict"
new_vocab_input_ids = ids_tensor(inputs_dict[ids_feat_name].shape, new_tokens_size)
new_vocab_input_ids += old_total_size
inputs_dict[ids_feat_name] = new_vocab_input_ids
if "input_ids" in inputs_dict:
inputs_dict["input_ids"] = new_vocab_input_ids
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"] = new_vocab_input_ids
prepared_inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**prepared_inputs)
# save and load the model
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
restored_model_outputs = model(**prepared_inputs)
# check that the output for the restored model is the same
self.assert_outputs_same(restored_model_outputs, outputs)
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0,
reason="This test always passes on CPU.",
)
def test_embeddings_out_of_bounds_raise_exception(self):
# TF embeddings layers don't raise an exception when an index is out of bounds on GPU, so we manually raise it.
# This test should only fail on GPU for models where we haven't added the safety check.
if not self.test_resize_embeddings:
return
config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config=config)
inputs_dict = copy.deepcopy(original_inputs_dict)
if "input_ids" in inputs_dict:
inputs_dict["input_ids"] = inputs_dict["input_ids"] * int(1e9)
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"] = inputs_dict["decoder_input_ids"] * int(1e9)
prepared_inputs = self._prepare_for_class(inputs_dict, model_class)
with self.assertRaises(tf.errors.InvalidArgumentError):
model(**prepared_inputs)
def test_lm_head_model_random_no_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict.get("input_ids", None)
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined model needs input_ids
with self.assertRaises(ValueError):
model.generate(do_sample=True, max_length=5)
# num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True))
elif model_class.__name__ not in ["TFSpeech2TextForConditionalGeneration"]:
# Models with non-text inputs won't work here; num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5))
with self.assertRaises(ValueError):
# generating multiple sequences when no beam search generation
# is not allowed as it would always generate the same sequences
model.generate(input_ids, do_sample=False, num_return_sequences=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_lm_head_model_no_beam_search_generate_dict_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict.get("input_ids", None)
if input_ids is None:
input_ids = inputs_dict.get("input_features", None)
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
output_greedy = model.generate(
input_ids,
do_sample=False,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
output_sample = model.generate(
input_ids,
do_sample=True,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_greedy, TFGreedySearchEncoderDecoderOutput)
self.assertIsInstance(output_sample, TFSampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_greedy, TFGreedySearchDecoderOnlyOutput)
self.assertIsInstance(output_sample, TFSampleDecoderOnlyOutput)
def test_lm_head_model_random_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict.get("input_ids", None)
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined model needs input_ids, num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))
with self.assertRaises(ValueError):
# generating more sequences than having beams leads is not possible
model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)
# num_return_sequences > 1, sample
self._check_generated_ids(
model.generate(
input_ids,
do_sample=True,
num_beams=2,
num_return_sequences=2,
)
)
# num_return_sequences > 1, greedy
self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_lm_head_model_beam_search_generate_dict_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict.get("input_ids", None)
if input_ids is None:
input_ids = inputs_dict.get("input_features", None)
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
output_beam_search = model.generate(
input_ids,
num_beams=2,
do_sample=False,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
output_beam_sample = model.generate(
input_ids,
num_beams=2,
do_sample=True,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, TFBeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_beam_sample, TFBeamSampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, TFBeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_beam_sample, TFBeamSampleDecoderOnlyOutput)
def test_loss_computation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# The number of elements in the loss should be the same as the number of elements in the label
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
added_label_names = sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=True)
if not added_label_names:
continue # This test is only for models with easily-separable labels
added_label = prepared_for_class[added_label_names[0]]
expected_loss_size = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"}
input_name = possible_input_names.intersection(set(prepared_for_class)).pop()
model_input = prepared_for_class.pop(input_name)
outputs = model(model_input, **prepared_for_class)
if not isinstance(outputs, ModelOutput) or not hasattr(outputs, "loss"):
continue
loss = outputs.loss
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"}
input_name = possible_input_names.intersection(set(prepared_for_class)).pop()
model_input = prepared_for_class.pop(input_name)
if "labels" in prepared_for_class:
labels = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
labels[0] = -100
prepared_for_class["labels"] = tf.convert_to_tensor(labels)
loss = model(model_input, **prepared_for_class)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
loss = model(prepared_for_class)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# Get keys that were added with the _prepare_for_class function
label_keys = prepared_for_class.keys() - inputs_dict.keys()
signature = inspect.signature(model.call).parameters
signature_names = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
tuple_index_mapping = {0: input_name}
for label_key in label_keys:
label_key_index = signature_names.index(label_key)
tuple_index_mapping[label_key_index] = label_key
sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
list_input = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
list_input[index] = prepared_for_class[value]
tuple_input = tuple(list_input)
# Send to model
loss = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3):
self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol))
@slow
def test_keras_fit(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# We also remove "return_loss" as this is covered by the train_step when using fit()
prepared_for_class = {
key: val
for key, val in prepared_for_class.items()
if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "return_loss")
}
if "labels" in prepared_for_class and "decoder_input_ids" in prepared_for_class:
del prepared_for_class["decoder_input_ids"]
accuracy_classes = [
"ForPreTraining",
"ForCausalLM",
"ForMaskedLM",
"ForQuestionAnswering",
"ForMultipleChoice",
"ForSequenceClassification",
"ForTokenClassification",
"ForNextSentencePrediction",
"LMHeadModel",
]
for accuracy_class in accuracy_classes:
if model.__class__.__name__.endswith(accuracy_class):
metrics = [keras.metrics.SparseCategoricalAccuracy()]
break
else:
metrics = []
if hasattr(self.model_tester, "batch_size"):
sample_weight = tf.convert_to_tensor([0.5] * self.model_tester.batch_size, dtype=tf.float32)
else:
sample_weight = None
# Build the model so we can get some constant weights and check outputs
outputs = model(prepared_for_class)
if getattr(outputs, "loss", None) is None:
continue
model_weights = model.get_weights()
# Run eagerly to save some expensive compilation times
model.compile(optimizer=keras.optimizers.SGD(0.0), run_eagerly=True, metrics=metrics)
# Make sure the model fits without crashing regardless of where we pass the labels
history1 = model.fit(
prepared_for_class,
validation_data=prepared_for_class,
sample_weight=sample_weight,
steps_per_epoch=1,
validation_steps=1,
shuffle=False,
)
val_loss1 = history1.history["val_loss"][0]
self.assertTrue(not isnan(val_loss1))
accuracy1 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")}
possible_label_cols = {
"labels",
"label",
"label_ids",
"start_positions",
"start_position",
"end_positions",
"end_position",
"next_sentence_label",
}
label_names = possible_label_cols.intersection(set(prepared_for_class))
if len(label_names) == 0:
# The next tests only make sense for models with separate inputs and labels, and do not make
# sense for models that don't clearly distinguish between the two (e.g. CLIP)
return
labels = {key: val for key, val in prepared_for_class.items() if key in label_names}
inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names}
self.assertGreater(len(inputs_minus_labels), 0)
# We reinitialize the model here even though our learning rate was zero
# because BatchNorm updates weights by means other than gradient descent.
model.set_weights(model_weights)
history2 = model.fit(
inputs_minus_labels,
labels,
validation_data=(inputs_minus_labels, labels),
sample_weight=sample_weight,
steps_per_epoch=1,
validation_steps=1,
shuffle=False,
)
val_loss2 = history2.history["val_loss"][0]
self.assertTrue(not isnan(val_loss2))
accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")}
self.check_keras_fit_results(val_loss1, val_loss2)
self.assertEqual(history1.history.keys(), history2.history.keys())
for key in history1.history.keys():
if not key.startswith("val_"):
self.assertTrue("val_" + key in history1.history.keys(), "Outputs differ in train/test step!")
if metrics:
self.assertTrue(len(accuracy1) == len(accuracy2) > 0, "Missing metrics!")
def test_int_support(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
prepared_for_class = self._prepare_for_class(
inputs_dict.copy(),
model_class,
return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False,
)
if not any(
tensor.dtype.is_integer for tensor in prepared_for_class.values() if isinstance(tensor, tf.Tensor)
):
return # No integer inputs means no need for this test
prepared_for_class = {
key: tf.cast(tensor, tf.int64) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor
for key, tensor in prepared_for_class.items()
}
model = model_class(config)
model(**prepared_for_class) # No assertion, we're just checking this doesn't throw an error
int32_prepared_for_class = {
key: tf.cast(tensor, tf.int32) if isinstance(tensor, tf.Tensor) and tensor.dtype.is_integer else tensor
for key, tensor in prepared_for_class.items()
}
model(**int32_prepared_for_class) # No assertion, we're just checking this doesn't throw an error
# After testing that the model accepts all int inputs, confirm that its dummies are int32
for key, tensor in model.dummy_inputs.items():
self.assertTrue(
isinstance(tensor, tf.Tensor) or keras.backend.is_keras_tensor(tensor),
"Dummy inputs should be tf.Tensor!",
)
if tensor.dtype.is_integer:
self.assertTrue(tensor.dtype == tf.int32, "Integer dummy inputs should be tf.int32!")
# Also confirm that the input_signature uses int32
for key, tensor_spec in model.input_signature.items():
if tensor_spec.dtype.is_integer:
self.assertTrue(tensor_spec.dtype == tf.int32, "Input signatures should use tf.int32 for ints!")
def test_generate_with_headmasking(self):
attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_generative_model_classes:
model = model_class(config)
# We want to test only encoder-decoder models
if not config.is_encoder_decoder:
continue
head_masking = {
"head_mask": tf.zeros((config.encoder_layers, config.encoder_attention_heads)),
"decoder_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)),
"cross_attn_head_mask": tf.zeros((config.decoder_layers, config.decoder_attention_heads)),
}
signature = inspect.signature(model.call)
if set(head_masking.keys()) < {*signature.parameters.keys()}:
continue
for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
out = model.generate(
inputs_dict["input_ids"],
num_beams=1,
max_length=inputs_dict["input_ids"] + 5,
output_attentions=True,
return_dict_in_generate=True,
**{name: mask},
)
# We check the state of decoder_attentions and cross_attentions just from the last step
attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([tf.reduce_sum(w).numpy() for w in attn_weights]), 0.0)
def test_load_with_mismatched_shapes(self):
if not self.test_mismatched_shapes:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if model_class not in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
continue
with self.subTest(msg=f"Testing {model_class}"):
with tempfile.TemporaryDirectory() as tmp_dir:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
_ = model(**inputs)
model.save_pretrained(tmp_dir)
# Fails when we don't set ignore_mismatched_sizes=True
with self.assertRaises(ValueError):
new_model = TFAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42)
with self.assertRaises(ValueError):
new_model_without_prefix = TFAutoModel.from_pretrained(tmp_dir, vocab_size=10)
logger = logging.get_logger("transformers.modeling_tf_utils")
with CaptureLogger(logger) as cl:
new_model = TFAutoModelForSequenceClassification.from_pretrained(
tmp_dir, num_labels=42, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
logits = new_model(**inputs).logits
self.assertEqual(logits.shape[1], 42)
with CaptureLogger(logger) as cl:
new_model_without_prefix = TFAutoModel.from_pretrained(
tmp_dir, vocab_size=10, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
# Although Tf models always have a prefix pointing to `MainLayer`,
# we still add this "without prefix" test to keep a consistency between tf and pt tests.
input_ids = ids_tensor((2, 8), 10)
if self.is_encoder_decoder:
new_model_without_prefix(input_ids, decoder_input_ids=input_ids)
else:
new_model_without_prefix(input_ids)
def test_model_main_input_name(self):
for model_class in self.all_model_classes:
model_signature = inspect.signature(getattr(model_class, "call"))
# The main input is the name of the argument after `self`
observed_main_input_name = list(model_signature.parameters.keys())[1]
self.assertEqual(model_class.main_input_name, observed_main_input_name)
def test_dataset_conversion(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=False)
if "labels" in tf_inputs_dict:
return # This is some kinda funky decoder model that needs labels in its forward pass
tf_inputs_dict = {
key: val
for key, val in tf_inputs_dict.items()
if "head_mask" not in key and isinstance(val, tf.Tensor)
}
tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor
input_dataset = Dataset.from_dict(tf_inputs_dict)
tf_dataset = model.prepare_tf_dataset(
input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False
)
test_batch = next(iter(tf_dataset))
if isinstance(test_batch, tf.Tensor):
self.assertEqual(len(test_batch), len(input_dataset)) # Assert we didn't lose any data
elif isinstance(test_batch, dict):
# Assert we discarded the unwanted extra column but kept everything else
self.assertEqual(len(test_batch), len(input_dataset.features) - 1)
self.assertNotIn("extra_unwanted_column", test_batch)
for tensor in test_batch.values():
self.assertTrue(isinstance(tensor, tf.Tensor))
self.assertEqual(len(tensor), len(input_dataset)) # Assert we didn't lose any data
model(test_batch, training=False)
if "labels" in inspect.signature(model_class.call).parameters.keys():
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
if "labels" not in tf_inputs_dict:
return # This model isn't giving us labels after all, don't try training with it
tf_inputs_dict = {key: val for key, val in tf_inputs_dict.items() if "head_mask" not in key}
tf_inputs_dict["extra_unwanted_column"] = list(tf_inputs_dict.values())[0] # Use a random other tensor
input_dataset = Dataset.from_dict(tf_inputs_dict)
tf_dataset = model.prepare_tf_dataset(
input_dataset, batch_size=len(input_dataset), drop_remainder=False, shuffle=False
)
test_batch, test_batch_labels = next(iter(tf_dataset))
self.assertGreater(len(test_batch_labels), 0) # Assert the labels are present
feature_columns = 1 if isinstance(test_batch, tf.Tensor) else len(test_batch)
label_columns = 1 if isinstance(test_batch_labels, tf.Tensor) else len(test_batch_labels)
# Assert we discarded the unwanted extra column but kept everything else
self.assertEqual(feature_columns + label_columns, len(input_dataset.features) - 1)
if isinstance(test_batch, dict):
self.assertNotIn("extra_unwanted_column", test_batch)
if isinstance(test_batch_labels, dict):
self.assertNotIn("extra_unwanted_column", test_batch_labels)
model.compile(optimizer="sgd", run_eagerly=True)
model.train_on_batch(test_batch, test_batch_labels)
def _test_xla_generate(self, **generate_kwargs):
def _generate_and_check_results(model, inputs_dict):
if "input_ids" in inputs_dict:
inputs = inputs_dict["input_ids"]
# make sure there are no pad tokens in prompt, which may trigger unwanted behavior
if model.generation_config.pad_token_id is not None:
if config.pad_token_id == 0:
new_pad_token = model.generation_config.pad_token_id + 1
else:
new_pad_token = model.generation_config.pad_token_id - 1
else:
new_pad_token = None
inputs = tf.where(inputs != model.generation_config.pad_token_id, inputs, new_pad_token)
elif "input_features" in inputs_dict:
inputs = inputs_dict["input_features"]
else:
raise ValueError("No valid generate input found in inputs_dict")
generated = model.generate(inputs, **generate_kwargs).numpy()
generate_xla = tf.function(model.generate, jit_compile=True)
generated_xla = generate_xla(inputs, **generate_kwargs).numpy()
# Due to numerical instability, let's fail the test only if there are more than 10% of input sequences give
# different outputs between XLA and non-XLA versions. If there are less than 10 examples, let's be strict
# and not allow any difference.
diff = [[], []]
for _generated, _generated_xla in zip(generated.tolist(), generated_xla.tolist()):
if _generated != _generated_xla:
diff[0].append(_generated)
diff[1].append(_generated_xla)
ratio = len(diff[0]) / len(generated)
if ratio > 0.1 or (len(diff[0]) > 0 and len(generated) < 10):
self.assertListEqual(diff[0], diff[1])
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.eos_token_id = None # Generate until max length
config.do_sample = False
# fix config for models with additional sequence-length limiting settings
for var_name in ["max_position_embeddings", "max_target_positions"]:
attr = getattr(config, var_name, None)
if attr is not None and attr < generate_kwargs["max_new_tokens"]:
try:
setattr(config, var_name, generate_kwargs["max_new_tokens"])
except NotImplementedError:
# xlnet will raise an exception when trying to set
# max_position_embeddings.
pass
model = model_class(config)
if model.supports_xla_generation:
_generate_and_check_results(model, inputs_dict)
else:
with self.assertRaises(ValueError):
_generate_and_check_results(model, inputs_dict)
def test_xla_generate_fast(self):
"""
Basic quick test for generate-compatible classes that confirms that XLA-generated tokens are the same as their
non XLA counterparts.
Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception
"""
self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=3)
@slow
def test_xla_generate_contrastive(self):
"""
Slow and challenging version of `test_xla_generate_fast` for contrastive search -- contrastive search directly
manipulates the model cache and other outputs, and this test ensures that they are in a valid format that is
also supported by XLA.
Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception
"""
self._test_xla_generate(num_beams=1, num_return_sequences=1, max_new_tokens=16, penalty_alpha=0.5, top_k=4)
@slow
def test_xla_generate_slow(self):
"""
Slow and challenging version of `test_xla_generate_fast` -- this test asks for several long sequences using
beam search, with and without XLA. The two outputs should match, and a failure in this test indicates that the
model may need further analysis if it is to be used for XLA generation.
Either the model supports XLA generation and passes the inner test, or it raises an appropriate exception
"""
self._test_xla_generate(num_beams=8, num_return_sequences=2, max_new_tokens=128)
def _generate_random_bad_tokens(self, num_bad_tokens, model):
# special tokens cannot be bad tokens
special_tokens = []
if model.config.bos_token_id is not None:
special_tokens.append(model.config.bos_token_id)
if model.config.pad_token_id is not None:
special_tokens.append(model.config.pad_token_id)
if model.config.eos_token_id is not None:
special_tokens.append(model.config.eos_token_id)
# create random bad tokens that are not special tokens
bad_tokens = []
while len(bad_tokens) < num_bad_tokens:
token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
if token not in special_tokens:
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].numpy().tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
# for all bad word tokens
for bad_word_ids in bad_words_ids:
# for all slices in batch
for generated_ids_slice in generated_ids:
# for all word idx
for i in range(len(bad_word_ids), len(generated_ids_slice)):
# if tokens match
if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
return True
return False
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
return output
def random_attention_mask(shape, rng=None, name=None, dtype=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None, dtype=dtype)
# make sure that at least one token is attended to for each batch
attn_mask = tf.concat([attn_mask[:, :-1], tf.ones_like(attn_mask[:, -1:], dtype=dtype)], axis=-1)
return attn_mask
def floats_tensor(shape, scale=1.0, rng=None, name=None, dtype=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return tf.reshape(tf.constant(values, dtype=dtype if dtype is not None else tf.float32), shape=shape)
| transformers/tests/test_modeling_tf_common.py/0 | {
"file_path": "transformers/tests/test_modeling_tf_common.py",
"repo_id": "transformers",
"token_count": 43519
} | 417 |
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pathlib import Path
from transformers import is_vision_available, load_tool
from transformers.testing_utils import get_tests_dir
from .test_tools_common import ToolTesterMixin
if is_vision_available():
from PIL import Image
class ImageSegmentationToolTester(unittest.TestCase, ToolTesterMixin):
def setUp(self):
self.tool = load_tool("image-segmentation")
self.tool.setup()
self.remote_tool = load_tool("image-segmentation", remote=True)
def test_exact_match_arg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image, "cat")
self.assertTrue(isinstance(result, Image.Image))
def test_exact_match_arg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image, "cat")
self.assertTrue(isinstance(result, Image.Image))
def test_exact_match_kwarg(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.tool(image=image, label="cat")
self.assertTrue(isinstance(result, Image.Image))
def test_exact_match_kwarg_remote(self):
image = Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png")
result = self.remote_tool(image=image, label="cat")
self.assertTrue(isinstance(result, Image.Image))
| transformers/tests/tools/test_image_segmentation.py/0 | {
"file_path": "transformers/tests/tools/test_image_segmentation.py",
"repo_id": "transformers",
"token_count": 742
} | 418 |
# coding=utf-8
# Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import numpy as np
from transformers.data.data_collator import default_data_collator
from transformers.testing_utils import require_accelerate, require_torch
from transformers.trainer_utils import RemoveColumnsCollator, find_executable_batch_size
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import IterableDataset
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers.tokenization_utils_base import BatchEncoding
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
get_parameter_names,
numpy_pad_and_concatenate,
torch_pad_and_concatenate,
)
class TstLayer(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear1 = nn.Linear(hidden_size, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h = self.ln1(nn.functional.relu(self.linear1(x)))
h = nn.functional.relu(self.linear2(x))
return self.ln2(x + h + self.bias)
class RandomIterableDataset(IterableDataset):
# For testing, an iterable dataset of random length
def __init__(self, p_stop=0.01, max_length=1000):
self.p_stop = p_stop
self.max_length = max_length
self.generator = torch.Generator()
def __iter__(self):
count = 0
stop = False
while not stop and count < self.max_length:
yield count
count += 1
number = torch.rand(1, generator=self.generator).item()
stop = number < self.p_stop
@require_torch
class TrainerUtilsTest(unittest.TestCase):
def test_distributed_tensor_gatherer(self):
# Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1
world_size = 4
num_samples = 21
input_indices = [
[0, 1, 6, 7, 12, 13, 18, 19],
[2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1],
[5, 11, 17, 2],
]
predictions = np.random.normal(size=(num_samples, 13))
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices in input_indices:
gatherer.add_arrays(predictions[indices])
result = gatherer.finalize()
self.assertTrue(np.array_equal(result, predictions))
# With nested tensors
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices in input_indices:
gatherer.add_arrays([predictions[indices], [predictions[indices], predictions[indices]]])
result = gatherer.finalize()
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 2)
self.assertTrue(isinstance(result[1], list))
self.assertEqual(len(result[1]), 2)
self.assertTrue(np.array_equal(result[0], predictions))
self.assertTrue(np.array_equal(result[1][0], predictions))
self.assertTrue(np.array_equal(result[1][1], predictions))
def test_distributed_tensor_gatherer_different_shapes(self):
# Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1
world_size = 4
num_samples = 21
input_indices = [
[0, 1, 6, 7, 12, 13, 18, 19],
[2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1],
[5, 11, 17, 2],
]
sequence_lengths = [8, 10, 13]
predictions = np.random.normal(size=(num_samples, 13))
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices, seq_length in zip(input_indices, sequence_lengths):
gatherer.add_arrays(predictions[indices, :seq_length])
result = gatherer.finalize()
# Remove the extra samples added at the end for a round multiple of num processes.
actual_indices = [input_indices[0], input_indices[1][:-2], input_indices[2][:-1]]
for indices, seq_length in zip(actual_indices, sequence_lengths):
self.assertTrue(np.array_equal(result[indices, :seq_length], predictions[indices, :seq_length]))
# With nested tensors
predictions = np.random.normal(size=(num_samples, 13))
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices, seq_length in zip(input_indices, sequence_lengths):
gatherer.add_arrays([predictions[indices, :seq_length], predictions[indices]])
result = gatherer.finalize()
for indices, seq_length in zip(actual_indices, sequence_lengths):
self.assertTrue(np.array_equal(result[0][indices, :seq_length], predictions[indices, :seq_length]))
self.assertTrue(np.array_equal(result[1], predictions))
# Check if works if varying seq_length is second
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices, seq_length in zip(input_indices, sequence_lengths):
gatherer.add_arrays([predictions[indices], predictions[indices, :seq_length]])
result = gatherer.finalize()
self.assertTrue(np.array_equal(result[0], predictions))
for indices, seq_length in zip(actual_indices, sequence_lengths):
self.assertTrue(np.array_equal(result[1][indices, :seq_length], predictions[indices, :seq_length]))
def test_label_smoothing(self):
epsilon = 0.1
num_labels = 12
random_logits = torch.randn(4, 5, num_labels)
random_labels = torch.randint(0, num_labels, (4, 5))
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
model_output = SequenceClassifierOutput(logits=random_logits)
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean()
self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss))
# With a few -100 labels
random_labels[0, 1] = -100
random_labels[2, 1] = -100
random_labels[2, 3] = -100
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
model_output = SequenceClassifierOutput(logits=random_logits)
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
# Mask the log probs with the -100 labels
log_probs[0, 1] = 0.0
log_probs[2, 1] = 0.0
log_probs[2, 3] = 0.0
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17)
self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss))
def test_group_by_length(self):
# Get some inputs of random lengths
lengths = torch.randint(0, 25, (100,)).tolist()
# Put one bigger than the others to check it ends up in first position
lengths[32] = 50
indices = list(LengthGroupedSampler(4, lengths=lengths))
# The biggest element should be first
self.assertEqual(lengths[indices[0]], 50)
# The indices should be a permutation of range(100)
self.assertEqual(sorted(indices), list(range(100)))
def test_group_by_length_with_dict(self):
# Get some inputs of random lengths
data = []
for _ in range(6):
input_ids = torch.randint(0, 25, (100,)).tolist()
data.append({"input_ids": input_ids})
# Put one bigger than the others to check it ends up in first position
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
indices = list(LengthGroupedSampler(4, dataset=data))
# The biggest element should be first
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
# The indices should be a permutation of range(6)
self.assertEqual(sorted(indices), list(range(6)))
def test_group_by_length_with_batch_encoding(self):
# Get some inputs of random lengths
data = []
for _ in range(6):
input_ids = torch.randint(0, 25, (100,)).tolist()
data.append(BatchEncoding({"input_ids": input_ids}))
# Put one bigger than the others to check it ends up in first position
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
indices = list(LengthGroupedSampler(4, dataset=data))
# The biggest element should be first
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
# The indices should be a permutation of range(6)
self.assertEqual(sorted(indices), list(range(6)))
def test_distributed_length_grouped(self):
# Get some inputs of random lengths
lengths = torch.randint(0, 25, (100,)).tolist()
# Put one bigger than the others to check it ends up in first position
lengths[32] = 50
indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths))
indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths))
# The biggest element should be first
self.assertEqual(lengths[indices_process_0[0]], 50)
# The indices should be a permutation of range(100)
self.assertEqual(sorted(indices_process_0 + indices_process_1), list(range(100)))
def test_get_parameter_names(self):
model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)]))
# fmt: off
self.assertEqual(
get_parameter_names(model, [nn.LayerNorm]),
['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias']
)
# fmt: on
def test_distributed_sampler_with_loop(self):
batch_size = 16
for length in [23, 64, 123]:
dataset = list(range(length))
shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0)
shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1)
# Set seeds
shard1.set_epoch(0)
shard2.set_epoch(0)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
self.assertTrue(len(samples1) % batch_size == 0)
self.assertTrue(len(samples2) % batch_size == 0)
total = []
for sample1, sample2 in zip(samples1, samples2):
total += [sample1, sample2]
self.assertEqual(set(total[:length]), set(dataset))
self.assertEqual(set(total[length:]), set(total[: (len(total) - length)]))
def test_sequential_distributed_sampler(self):
batch_size = 16
for length in [23, 64, 123]:
dataset = list(range(length))
shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0)
shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
total = samples1 + samples2
self.assertListEqual(total[:length], dataset)
self.assertListEqual(total[length:], dataset[: (len(total) - length)])
# With a batch_size passed
shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0, batch_size=batch_size)
shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1, batch_size=batch_size)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
self.assertTrue(len(samples1) % batch_size == 0)
self.assertTrue(len(samples2) % batch_size == 0)
total = samples1 + samples2
self.assertListEqual(total[:length], dataset)
self.assertListEqual(total[length:], dataset[: (len(total) - length)])
def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0):
# Set the seed for the base dataset to get the proper reference.
dataset.generator.manual_seed(epoch)
reference = list(dataset)
shards = [
IterableDatasetShard(
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
for shard in shards:
shard.set_epoch(epoch)
shard_lists = [list(shard) for shard in shards]
for shard in shard_lists:
# All shards have a number of samples that is a round multiple of batch size
self.assertTrue(len(shard) % batch_size == 0)
# All shards have the same number of samples
self.assertEqual(len(shard), len(shard_lists[0]))
for shard in shards:
# All shards know the total number of samples
self.assertEqual(shard.num_examples, len(reference))
observed = []
for idx in range(0, len(shard_lists[0]), batch_size):
for shard in shard_lists:
observed += shard[idx : idx + batch_size]
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
# batch_size
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
# Check equivalence between IterableDataset and ShardSampler
dataset.generator.manual_seed(epoch)
reference = list(dataset)
sampler_shards = [
ShardSampler(
reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
for shard, sampler_shard in zip(shard_lists, sampler_shards):
self.assertListEqual(shard, list(sampler_shard))
def test_iterable_dataset_shard(self):
dataset = RandomIterableDataset()
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0)
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0)
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42)
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42)
def test_iterable_dataset_shard_with_length(self):
sampler_shards = [
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i)
for i in range(2)
]
# Build expected shards: each process will have batches of size 4 until there is not enough elements to
# form two full batches (so we stop at 96 = (100 // (4 * 2)) * 4)
expected_shards = [[], []]
current_shard = 0
for i in range(0, 96, 4):
expected_shards[current_shard].extend(list(range(i, i + 4)))
current_shard = 1 - current_shard
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
sampler_shards = [
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i)
for i in range(2)
]
# When drop_last=False, we get two last full batches by looping back to the beginning.
expected_shards[0].extend(list(range(96, 100)))
expected_shards[1].extend(list(range(0, 4)))
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2):
shards = [
ShardSampler(
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
shard_lists = [list(shard) for shard in shards]
for shard in shard_lists:
# All shards have a number of samples that is a round multiple of batch size
self.assertTrue(len(shard) % batch_size == 0)
# All shards have the same number of samples
self.assertEqual(len(shard), len(shard_lists[0]))
observed = []
for idx in range(0, len(shard_lists[0]), batch_size):
for shard in shard_lists:
observed += shard[idx : idx + batch_size]
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
# batch_size
reference = copy.copy(dataset)
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
def test_shard_sampler(self):
for n_elements in [64, 123]:
dataset = list(range(n_elements))
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2)
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2)
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3)
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3)
@require_accelerate
def test_executable_batch_size(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=True)
def mock_training_loop_function(batch_size):
nonlocal batch_sizes
batch_sizes.append(batch_size)
if batch_size > 16:
raise RuntimeError("CUDA out of memory.")
mock_training_loop_function()
self.assertEqual(batch_sizes, [64, 32, 16])
@require_accelerate
def test_executable_batch_size_no_search(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False)
def mock_training_loop_function(batch_size):
nonlocal batch_sizes
batch_sizes.append(batch_size)
mock_training_loop_function()
self.assertEqual(batch_sizes, [64])
@require_accelerate
def test_executable_batch_size_with_error(self):
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False)
def mock_training_loop_function(batch_size):
raise RuntimeError("CUDA out of memory.")
with self.assertRaises(RuntimeError) as cm:
mock_training_loop_function()
self.assertEqual("CUDA out of memory", cm.args[0])
def test_pad_and_concatenate_with_1d(self):
"""Tests whether pad_and_concatenate works with scalars."""
array1 = 1.0
array2 = 2.0
result = numpy_pad_and_concatenate(array1, array2)
self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result))
tensor1 = torch.tensor(1.0)
tensor2 = torch.tensor(2.0)
result = torch_pad_and_concatenate(tensor1, tensor2)
self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0])))
def test_remove_columns_collator(self):
class MockLogger:
def __init__(self) -> None:
self.called = 0
def info(self, msg):
self.called += 1
self.last_msg = msg
data_batch = [
{"col1": 1, "col2": 2, "col3": 3},
{"col1": 1, "col2": 2, "col3": 3},
]
logger = MockLogger()
remove_columns_collator = RemoveColumnsCollator(
default_data_collator, ["col1", "col2"], logger, "model", "training"
)
self.assertNotIn("col3", remove_columns_collator(data_batch))
# check that the logging message is printed out only once
remove_columns_collator(data_batch)
remove_columns_collator(data_batch)
self.assertEqual(logger.called, 1)
self.assertIn("col3", logger.last_msg)
| transformers/tests/trainer/test_trainer_utils.py/0 | {
"file_path": "transformers/tests/trainer/test_trainer_utils.py",
"repo_id": "transformers",
"token_count": 9624
} | 419 |
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import datasets
import numpy as np
import pytest
from huggingface_hub.file_download import http_get
from requests import ConnectTimeout, ReadTimeout
from tests.pipelines.test_pipelines_document_question_answering import INVOICE_URL
from transformers import is_torch_available, is_vision_available
from transformers.image_utils import ChannelDimension, get_channel_dimension_axis, make_list_of_images
from transformers.testing_utils import is_flaky, require_torch, require_vision
if is_torch_available():
import torch
if is_vision_available():
import PIL.Image
from transformers import ImageFeatureExtractionMixin
from transformers.image_utils import get_image_size, infer_channel_dimension_format, load_image
def get_random_image(height, width):
random_array = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8)
return PIL.Image.fromarray(random_array)
@require_vision
class ImageFeatureExtractionTester(unittest.TestCase):
def test_conversion_image_to_array(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
# Conversion with defaults (rescale + channel first)
array1 = feature_extractor.to_numpy_array(image)
self.assertTrue(array1.dtype, np.float32)
self.assertEqual(array1.shape, (3, 16, 32))
# Conversion with rescale and not channel first
array2 = feature_extractor.to_numpy_array(image, channel_first=False)
self.assertTrue(array2.dtype, np.float32)
self.assertEqual(array2.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array1, array2.transpose(2, 0, 1)))
# Conversion with no rescale and channel first
array3 = feature_extractor.to_numpy_array(image, rescale=False)
self.assertTrue(array3.dtype, np.uint8)
self.assertEqual(array3.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array1, array3.astype(np.float32) * (1 / 255.0)))
# Conversion with no rescale and not channel first
array4 = feature_extractor.to_numpy_array(image, rescale=False, channel_first=False)
self.assertTrue(array4.dtype, np.uint8)
self.assertEqual(array4.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array2, array4.astype(np.float32) * (1 / 255.0)))
def test_conversion_array_to_array(self):
feature_extractor = ImageFeatureExtractionMixin()
array = np.random.randint(0, 256, (16, 32, 3), dtype=np.uint8)
# By default, rescale (for an array of ints) and channel permute
array1 = feature_extractor.to_numpy_array(array)
self.assertTrue(array1.dtype, np.float32)
self.assertEqual(array1.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)))
# Same with no permute
array2 = feature_extractor.to_numpy_array(array, channel_first=False)
self.assertTrue(array2.dtype, np.float32)
self.assertEqual(array2.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0)))
# Force rescale to False
array3 = feature_extractor.to_numpy_array(array, rescale=False)
self.assertTrue(array3.dtype, np.uint8)
self.assertEqual(array3.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array3, array.transpose(2, 0, 1)))
# Force rescale to False and no channel permute
array4 = feature_extractor.to_numpy_array(array, rescale=False, channel_first=False)
self.assertTrue(array4.dtype, np.uint8)
self.assertEqual(array4.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array4, array))
# Now test the default rescale for a float array (defaults to False)
array5 = feature_extractor.to_numpy_array(array2)
self.assertTrue(array5.dtype, np.float32)
self.assertEqual(array5.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array5, array1))
def test_make_list_of_images_numpy(self):
# Test a single image is converted to a list of 1 image
images = np.random.randint(0, 256, (16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 1)
self.assertTrue(np.array_equal(images_list[0], images))
self.assertIsInstance(images_list, list)
# Test a batch of images is converted to a list of images
images = np.random.randint(0, 256, (4, 16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test a list of images is not modified
images = [np.random.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test batched masks with no channel dimension are converted to a list of masks
masks = np.random.randint(0, 2, (4, 16, 32))
masks_list = make_list_of_images(masks, expected_ndims=2)
self.assertEqual(len(masks_list), 4)
self.assertTrue(np.array_equal(masks_list[0], masks[0]))
self.assertIsInstance(masks_list, list)
@require_torch
def test_make_list_of_images_torch(self):
# Test a single image is converted to a list of 1 image
images = torch.randint(0, 256, (16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 1)
self.assertTrue(np.array_equal(images_list[0], images))
self.assertIsInstance(images_list, list)
# Test a batch of images is converted to a list of images
images = torch.randint(0, 256, (4, 16, 32, 3))
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
# Test a list of images is left unchanged
images = [torch.randint(0, 256, (16, 32, 3)) for _ in range(4)]
images_list = make_list_of_images(images)
self.assertEqual(len(images_list), 4)
self.assertTrue(np.array_equal(images_list[0], images[0]))
self.assertIsInstance(images_list, list)
@require_torch
def test_conversion_torch_to_array(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.randint(0, 256, (16, 32, 3))
array = tensor.numpy()
# By default, rescale (for a tensor of ints) and channel permute
array1 = feature_extractor.to_numpy_array(array)
self.assertTrue(array1.dtype, np.float32)
self.assertEqual(array1.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)))
# Same with no permute
array2 = feature_extractor.to_numpy_array(array, channel_first=False)
self.assertTrue(array2.dtype, np.float32)
self.assertEqual(array2.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0)))
# Force rescale to False
array3 = feature_extractor.to_numpy_array(array, rescale=False)
self.assertTrue(array3.dtype, np.uint8)
self.assertEqual(array3.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array3, array.transpose(2, 0, 1)))
# Force rescale to False and no channel permute
array4 = feature_extractor.to_numpy_array(array, rescale=False, channel_first=False)
self.assertTrue(array4.dtype, np.uint8)
self.assertEqual(array4.shape, (16, 32, 3))
self.assertTrue(np.array_equal(array4, array))
# Now test the default rescale for a float tensor (defaults to False)
array5 = feature_extractor.to_numpy_array(array2)
self.assertTrue(array5.dtype, np.float32)
self.assertEqual(array5.shape, (3, 16, 32))
self.assertTrue(np.array_equal(array5, array1))
def test_conversion_image_to_image(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
# On an image, `to_pil_image1` is a noop.
image1 = feature_extractor.to_pil_image(image)
self.assertTrue(isinstance(image, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image), np.array(image1)))
def test_conversion_array_to_image(self):
feature_extractor = ImageFeatureExtractionMixin()
array = np.random.randint(0, 256, (16, 32, 3), dtype=np.uint8)
# By default, no rescale (for an array of ints)
image1 = feature_extractor.to_pil_image(array)
self.assertTrue(isinstance(image1, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image1), array))
# If the array is channel-first, proper reordering of the channels is done.
image2 = feature_extractor.to_pil_image(array.transpose(2, 0, 1))
self.assertTrue(isinstance(image2, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image2), array))
# If the array has floating type, it's rescaled by default.
image3 = feature_extractor.to_pil_image(array.astype(np.float32) * (1 / 255.0))
self.assertTrue(isinstance(image3, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image3), array))
# You can override the default to rescale.
image4 = feature_extractor.to_pil_image(array.astype(np.float32), rescale=False)
self.assertTrue(isinstance(image4, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image4), array))
# And with floats + channel first.
image5 = feature_extractor.to_pil_image(array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0))
self.assertTrue(isinstance(image5, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image5), array))
@require_torch
def test_conversion_tensor_to_image(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.randint(0, 256, (16, 32, 3))
array = tensor.numpy()
# By default, no rescale (for a tensor of ints)
image1 = feature_extractor.to_pil_image(tensor)
self.assertTrue(isinstance(image1, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image1), array))
# If the tensor is channel-first, proper reordering of the channels is done.
image2 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1))
self.assertTrue(isinstance(image2, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image2), array))
# If the tensor has floating type, it's rescaled by default.
image3 = feature_extractor.to_pil_image(tensor.float() / 255.0)
self.assertTrue(isinstance(image3, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image3), array))
# You can override the default to rescale.
image4 = feature_extractor.to_pil_image(tensor.float(), rescale=False)
self.assertTrue(isinstance(image4, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image4), array))
# And with floats + channel first.
image5 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1).float() * (1 / 255.0))
self.assertTrue(isinstance(image5, PIL.Image.Image))
self.assertTrue(np.array_equal(np.array(image5), array))
def test_resize_image_and_array(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = np.array(image)
# Size can be an int or a tuple of ints.
resized_image = feature_extractor.resize(image, 8)
self.assertTrue(isinstance(resized_image, PIL.Image.Image))
self.assertEqual(resized_image.size, (8, 8))
resized_image1 = feature_extractor.resize(image, (8, 16))
self.assertTrue(isinstance(resized_image1, PIL.Image.Image))
self.assertEqual(resized_image1.size, (8, 16))
# Passing an array converts it to a PIL Image.
resized_image2 = feature_extractor.resize(array, 8)
self.assertTrue(isinstance(resized_image2, PIL.Image.Image))
self.assertEqual(resized_image2.size, (8, 8))
self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2)))
resized_image3 = feature_extractor.resize(image, (8, 16))
self.assertTrue(isinstance(resized_image3, PIL.Image.Image))
self.assertEqual(resized_image3.size, (8, 16))
self.assertTrue(np.array_equal(np.array(resized_image1), np.array(resized_image3)))
def test_resize_image_and_array_non_default_to_square(self):
feature_extractor = ImageFeatureExtractionMixin()
heights_widths = [
# height, width
# square image
(28, 28),
(27, 27),
# rectangular image: h < w
(28, 34),
(29, 35),
# rectangular image: h > w
(34, 28),
(35, 29),
]
# single integer or single integer in tuple/list
sizes = [22, 27, 28, 36, [22], (27,)]
for (height, width), size in zip(heights_widths, sizes):
for max_size in (None, 37, 1000):
image = get_random_image(height, width)
array = np.array(image)
size = size[0] if isinstance(size, (list, tuple)) else size
# Size can be an int or a tuple of ints.
# If size is an int, smaller edge of the image will be matched to this number.
# i.e, if height > width, then image will be rescaled to (size * height / width, size).
if height < width:
exp_w, exp_h = (int(size * width / height), size)
if max_size is not None and max_size < exp_w:
exp_w, exp_h = max_size, int(max_size * exp_h / exp_w)
elif width < height:
exp_w, exp_h = (size, int(size * height / width))
if max_size is not None and max_size < exp_h:
exp_w, exp_h = int(max_size * exp_w / exp_h), max_size
else:
exp_w, exp_h = (size, size)
if max_size is not None and max_size < size:
exp_w, exp_h = max_size, max_size
resized_image = feature_extractor.resize(image, size=size, default_to_square=False, max_size=max_size)
self.assertTrue(isinstance(resized_image, PIL.Image.Image))
self.assertEqual(resized_image.size, (exp_w, exp_h))
# Passing an array converts it to a PIL Image.
resized_image2 = feature_extractor.resize(array, size=size, default_to_square=False, max_size=max_size)
self.assertTrue(isinstance(resized_image2, PIL.Image.Image))
self.assertEqual(resized_image2.size, (exp_w, exp_h))
self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2)))
@require_torch
def test_resize_tensor(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.randint(0, 256, (16, 32, 3))
array = tensor.numpy()
# Size can be an int or a tuple of ints.
resized_image = feature_extractor.resize(tensor, 8)
self.assertTrue(isinstance(resized_image, PIL.Image.Image))
self.assertEqual(resized_image.size, (8, 8))
resized_image1 = feature_extractor.resize(tensor, (8, 16))
self.assertTrue(isinstance(resized_image1, PIL.Image.Image))
self.assertEqual(resized_image1.size, (8, 16))
# Check we get the same results as with NumPy arrays.
resized_image2 = feature_extractor.resize(array, 8)
self.assertTrue(np.array_equal(np.array(resized_image), np.array(resized_image2)))
resized_image3 = feature_extractor.resize(array, (8, 16))
self.assertTrue(np.array_equal(np.array(resized_image1), np.array(resized_image3)))
def test_normalize_image(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = np.array(image)
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# PIL Image are converted to NumPy arrays for the normalization
normalized_image = feature_extractor.normalize(image, mean, std)
self.assertTrue(isinstance(normalized_image, np.ndarray))
self.assertEqual(normalized_image.shape, (3, 16, 32))
# During the conversion rescale and channel first will be applied.
expected = array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)
np_mean = np.array(mean).astype(np.float32)[:, None, None]
np_std = np.array(std).astype(np.float32)[:, None, None]
expected = (expected - np_mean) / np_std
self.assertTrue(np.array_equal(normalized_image, expected))
def test_normalize_array(self):
feature_extractor = ImageFeatureExtractionMixin()
array = np.random.random((16, 32, 3))
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# mean and std can be passed as lists or NumPy arrays.
expected = (array - np.array(mean)) / np.array(std)
normalized_array = feature_extractor.normalize(array, mean, std)
self.assertTrue(np.array_equal(normalized_array, expected))
normalized_array = feature_extractor.normalize(array, np.array(mean), np.array(std))
self.assertTrue(np.array_equal(normalized_array, expected))
# Normalize will detect automatically if channel first or channel last is used.
array = np.random.random((3, 16, 32))
expected = (array - np.array(mean)[:, None, None]) / np.array(std)[:, None, None]
normalized_array = feature_extractor.normalize(array, mean, std)
self.assertTrue(np.array_equal(normalized_array, expected))
normalized_array = feature_extractor.normalize(array, np.array(mean), np.array(std))
self.assertTrue(np.array_equal(normalized_array, expected))
@require_torch
def test_normalize_tensor(self):
feature_extractor = ImageFeatureExtractionMixin()
tensor = torch.rand(16, 32, 3)
mean = [0.1, 0.5, 0.9]
std = [0.2, 0.4, 0.6]
# mean and std can be passed as lists or tensors.
expected = (tensor - torch.tensor(mean)) / torch.tensor(std)
normalized_tensor = feature_extractor.normalize(tensor, mean, std)
self.assertTrue(torch.equal(normalized_tensor, expected))
normalized_tensor = feature_extractor.normalize(tensor, torch.tensor(mean), torch.tensor(std))
self.assertTrue(torch.equal(normalized_tensor, expected))
# Normalize will detect automatically if channel first or channel last is used.
tensor = torch.rand(3, 16, 32)
expected = (tensor - torch.tensor(mean)[:, None, None]) / torch.tensor(std)[:, None, None]
normalized_tensor = feature_extractor.normalize(tensor, mean, std)
self.assertTrue(torch.equal(normalized_tensor, expected))
normalized_tensor = feature_extractor.normalize(tensor, torch.tensor(mean), torch.tensor(std))
self.assertTrue(torch.equal(normalized_tensor, expected))
def test_center_crop_image(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
cropped_image = feature_extractor.center_crop(image, size)
self.assertTrue(isinstance(cropped_image, PIL.Image.Image))
# PIL Image.size is transposed compared to NumPy or PyTorch (width first instead of height first).
expected_size = (size, size) if isinstance(size, int) else (size[1], size[0])
self.assertEqual(cropped_image.size, expected_size)
def test_center_crop_array(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = feature_extractor.to_numpy_array(image)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
cropped_array = feature_extractor.center_crop(array, size)
self.assertTrue(isinstance(cropped_array, np.ndarray))
expected_size = (size, size) if isinstance(size, int) else size
self.assertEqual(cropped_array.shape[-2:], expected_size)
# Check result is consistent with PIL.Image.crop
cropped_image = feature_extractor.center_crop(image, size)
self.assertTrue(np.array_equal(cropped_array, feature_extractor.to_numpy_array(cropped_image)))
@require_torch
def test_center_crop_tensor(self):
feature_extractor = ImageFeatureExtractionMixin()
image = get_random_image(16, 32)
array = feature_extractor.to_numpy_array(image)
tensor = torch.tensor(array)
# Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions.
crop_sizes = [8, (8, 64), 20, (32, 64)]
for size in crop_sizes:
cropped_tensor = feature_extractor.center_crop(tensor, size)
self.assertTrue(isinstance(cropped_tensor, torch.Tensor))
expected_size = (size, size) if isinstance(size, int) else size
self.assertEqual(cropped_tensor.shape[-2:], expected_size)
# Check result is consistent with PIL.Image.crop
cropped_image = feature_extractor.center_crop(image, size)
self.assertTrue(torch.equal(cropped_tensor, torch.tensor(feature_extractor.to_numpy_array(cropped_image))))
@require_vision
class LoadImageTester(unittest.TestCase):
def test_load_img_url(self):
img = load_image(INVOICE_URL)
img_arr = np.array(img)
self.assertEqual(img_arr.shape, (1061, 750, 3))
@is_flaky()
def test_load_img_url_timeout(self):
with self.assertRaises((ReadTimeout, ConnectTimeout)):
load_image(INVOICE_URL, timeout=0.001)
def test_load_img_local(self):
img = load_image("./tests/fixtures/tests_samples/COCO/000000039769.png")
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(480, 640, 3),
)
def test_load_img_base64_prefix(self):
try:
tmp_file = tempfile.mktemp()
with open(tmp_file, "wb") as f:
http_get(
"https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_0.txt", f
)
with open(tmp_file, encoding="utf-8") as b64:
img = load_image(b64.read())
img_arr = np.array(img)
finally:
os.remove(tmp_file)
self.assertEqual(img_arr.shape, (64, 32, 3))
def test_load_img_base64(self):
try:
tmp_file = tempfile.mktemp()
with open(tmp_file, "wb") as f:
http_get(
"https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_1.txt", f
)
with open(tmp_file, encoding="utf-8") as b64:
img = load_image(b64.read())
img_arr = np.array(img)
finally:
os.remove(tmp_file)
self.assertEqual(img_arr.shape, (64, 32, 3))
def test_load_img_rgba(self):
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img = load_image(dataset[0]["image"]) # img with mode RGBA
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(512, 512, 3),
)
def test_load_img_la(self):
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img = load_image(dataset[1]["image"]) # img with mode LA
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(512, 768, 3),
)
def test_load_img_l(self):
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img = load_image(dataset[2]["image"]) # img with mode L
img_arr = np.array(img)
self.assertEqual(
img_arr.shape,
(381, 225, 3),
)
def test_load_img_exif_transpose(self):
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
img_without_exif_transpose = dataset[3]["image"]
img_arr_without_exif_transpose = np.array(img_without_exif_transpose)
self.assertEqual(
img_arr_without_exif_transpose.shape,
(333, 500, 3),
)
img_with_exif_transpose = load_image(dataset[3]["image"])
img_arr_with_exif_transpose = np.array(img_with_exif_transpose)
self.assertEqual(
img_arr_with_exif_transpose.shape,
(500, 333, 3),
)
class UtilFunctionTester(unittest.TestCase):
def test_get_image_size(self):
# Test we can infer the size and channel dimension of an image.
image = np.random.randint(0, 256, (32, 64, 3))
self.assertEqual(get_image_size(image), (32, 64))
image = np.random.randint(0, 256, (3, 32, 64))
self.assertEqual(get_image_size(image), (32, 64))
# Test the channel dimension can be overriden
image = np.random.randint(0, 256, (3, 32, 64))
self.assertEqual(get_image_size(image, channel_dim=ChannelDimension.LAST), (3, 32))
def test_infer_channel_dimension(self):
# Test we fail with invalid input
with pytest.raises(ValueError):
infer_channel_dimension_format(np.random.randint(0, 256, (10, 10)))
with pytest.raises(ValueError):
infer_channel_dimension_format(np.random.randint(0, 256, (10, 10, 10, 10, 10)))
# Test we fail if neither first not last dimension is of size 3 or 1
with pytest.raises(ValueError):
infer_channel_dimension_format(np.random.randint(0, 256, (10, 1, 50)))
# But if we explicitly set one of the number of channels to 50 it works
inferred_dim = infer_channel_dimension_format(np.random.randint(0, 256, (10, 1, 50)), num_channels=50)
self.assertEqual(inferred_dim, ChannelDimension.LAST)
# Test we correctly identify the channel dimension
image = np.random.randint(0, 256, (3, 4, 5))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.FIRST)
image = np.random.randint(0, 256, (1, 4, 5))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.FIRST)
image = np.random.randint(0, 256, (4, 5, 3))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.LAST)
image = np.random.randint(0, 256, (4, 5, 1))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.LAST)
# We can take a batched array of images and find the dimension
image = np.random.randint(0, 256, (1, 3, 4, 5))
inferred_dim = infer_channel_dimension_format(image)
self.assertEqual(inferred_dim, ChannelDimension.FIRST)
def test_get_channel_dimension_axis(self):
# Test we correctly identify the channel dimension
image = np.random.randint(0, 256, (3, 4, 5))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 0)
image = np.random.randint(0, 256, (1, 4, 5))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 0)
image = np.random.randint(0, 256, (4, 5, 3))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 2)
image = np.random.randint(0, 256, (4, 5, 1))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 2)
# We can take a batched array of images and find the dimension
image = np.random.randint(0, 256, (1, 3, 4, 5))
inferred_axis = get_channel_dimension_axis(image)
self.assertEqual(inferred_axis, 1)
| transformers/tests/utils/test_image_utils.py/0 | {
"file_path": "transformers/tests/utils/test_image_utils.py",
"repo_id": "transformers",
"token_count": 13075
} | 420 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is responsible for cleaning the list of doctests by making sure the entries all exist and are in
alphabetical order.
Usage (from the root of the repo):
Check that the doctest list is properly sorted and all files exist (used in `make repo-consistency`):
```bash
python utils/check_doctest_list.py
```
Auto-sort the doctest list if it is not properly sorted (used in `make fix-copies`):
```bash
python utils/check_doctest_list.py --fix_and_overwrite
```
"""
import argparse
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
REPO_PATH = "."
DOCTEST_FILE_PATHS = ["not_doctested.txt", "slow_documentation_tests.txt"]
def clean_doctest_list(doctest_file: str, overwrite: bool = False):
"""
Cleans the doctest in a given file.
Args:
doctest_file (`str`):
The path to the doctest file to check or clean.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not to fix problems. If `False`, will error when the file is not clean.
"""
non_existent_paths = []
all_paths = []
with open(doctest_file, "r", encoding="utf-8") as f:
for line in f:
line = line.strip().split(" ")[0]
path = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(line)
if len(non_existent_paths) > 0:
non_existent_paths = "\n".join([f"- {f}" for f in non_existent_paths])
raise ValueError(f"`{doctest_file}` contains non-existent paths:\n{non_existent_paths}")
sorted_paths = sorted(all_paths)
if all_paths != sorted_paths:
if not overwrite:
raise ValueError(
f"Files in `{doctest_file}` are not in alphabetical order, run `make fix-copies` to fix "
"this automatically."
)
with open(doctest_file, "w", encoding="utf-8") as f:
f.write("\n".join(sorted_paths) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
for doctest_file in DOCTEST_FILE_PATHS:
doctest_file = os.path.join(REPO_PATH, "utils", doctest_file)
clean_doctest_list(doctest_file, args.fix_and_overwrite)
| transformers/utils/check_doctest_list.py/0 | {
"file_path": "transformers/utils/check_doctest_list.py",
"repo_id": "transformers",
"token_count": 1179
} | 421 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
modified_files = (
subprocess.check_output(f"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode("utf-8").split()
)
joined_dirs = "|".join(sys.argv[1:])
regex = re.compile(rf"^({joined_dirs}).*?\.py$")
relevant_modified_files = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| transformers/utils/get_modified_files.py/0 | {
"file_path": "transformers/utils/get_modified_files.py",
"repo_id": "transformers",
"token_count": 448
} | 422 |
import torch
from transformers import PreTrainedModel
from .custom_configuration import CustomConfig, NoSuperInitConfig
class CustomModel(PreTrainedModel):
config_class = CustomConfig
def __init__(self, config):
super().__init__(config)
self.linear = torch.nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, x):
return self.linear(x)
def _init_weights(self, module):
pass
class NoSuperInitModel(PreTrainedModel):
config_class = NoSuperInitConfig
def __init__(self, config):
super().__init__(config)
self.linear = torch.nn.Linear(config.attribute, config.attribute)
def forward(self, x):
return self.linear(x)
def _init_weights(self, module):
pass
| transformers/utils/test_module/custom_modeling.py/0 | {
"file_path": "transformers/utils/test_module/custom_modeling.py",
"repo_id": "transformers",
"token_count": 289
} | 423 |
include settings.ini
include LICENSE
include CONTRIBUTING.md
include README.md
recursive-exclude * __pycache__
| trl/MANIFEST.in/0 | {
"file_path": "trl/MANIFEST.in",
"repo_id": "trl",
"token_count": 34
} | 424 |
#!/bin/bash
# This script runs an SFT example end-to-end on a tiny model using different possible configurations
# but defaults to QLoRA + PEFT
OUTPUT_DIR="test_dpo/"
MODEL_NAME="HuggingFaceM4/tiny-random-LlamaForCausalLM"
MAX_STEPS=5
BATCH_SIZE=2
SEQ_LEN=128
# Handle extra arguments in case one passes accelerate configs.
EXTRA_ACCELERATE_ARGS=""
EXTRA_TRAINING_ARGS="""--use_peft \
--load_in_4bit
"""
# This is a hack to get the number of available GPUs
NUM_GPUS=2
if [[ "${TRL_ACCELERATE_CONFIG}" == "" ]]; then
EXTRA_ACCELERATE_ARGS=""
else
EXTRA_ACCELERATE_ARGS="--config_file $TRL_ACCELERATE_CONFIG"
# For DeepSpeed configs we need to set the `--fp16` flag to comply with our configs exposed
# on `examples/accelerate_configs` and our runners do not support bf16 mixed precision training.
if [[ $TRL_ACCELERATE_CONFIG == *"deepspeed"* ]]; then
EXTRA_TRAINING_ARGS="--fp16"
else
echo "Keeping QLoRA + PEFT"
fi
fi
CMD="""
accelerate launch $EXTRA_ACCELERATE_ARGS \
--num_processes $NUM_GPUS \
--mixed_precision 'fp16' \
`pwd`/examples/scripts/dpo.py \
--model_name_or_path $MODEL_NAME \
--output_dir $OUTPUT_DIR \
--max_steps $MAX_STEPS \
--per_device_train_batch_size $BATCH_SIZE \
--max_length $SEQ_LEN \
$EXTRA_TRAINING_ARGS
"""
echo "Starting program..."
{ # try
echo $CMD
eval "$CMD"
} || { # catch
# save log for exception
echo "Operation Failed!"
exit 1
}
exit 0 | trl/commands/run_dpo.sh/0 | {
"file_path": "trl/commands/run_dpo.sh",
"repo_id": "trl",
"token_count": 597
} | 425 |
# Logging
As reinforcement learning algorithms are historically challenging to debug, it's important to pay careful attention to logging.
By default, the TRL [`PPOTrainer`] saves a lot of relevant information to `wandb` or `tensorboard`.
Upon initialization, pass one of these two options to the [`PPOConfig`]:
```
config = PPOConfig(
model_name=args.model_name,
log_with=`wandb`, # or `tensorboard`
)
```
If you want to log with tensorboard, add the kwarg `project_kwargs={"logging_dir": PATH_TO_LOGS}` to the PPOConfig.
## PPO Logging
Here's a brief explanation for the logged metrics provided in the data:
Key metrics to monitor. We want to maximize the reward, maintain a low KL divergence, and maximize entropy:
1. `env/reward_mean`: The average reward obtained from the environment. Alias `ppo/mean_scores`, which is sed to specifically monitor the reward model.
1. `env/reward_std`: The standard deviation of the reward obtained from the environment. Alias ``ppo/std_scores`, which is sed to specifically monitor the reward model.
1. `env/reward_dist`: The histogram distribution of the reward obtained from the environment.
1. `objective/kl`: The mean Kullback-Leibler (KL) divergence between the old and new policies. It measures how much the new policy deviates from the old policy. The KL divergence is used to compute the KL penalty in the objective function.
1. `objective/kl_dist`: The histogram distribution of the `objective/kl`.
1. `objective/kl_coef`: The coefficient for Kullback-Leibler (KL) divergence in the objective function.
1. `ppo/mean_non_score_reward`: The **KL penalty** calculated by `objective/kl * objective/kl_coef` as the total reward for optimization to prevent the new policy from deviating too far from the old policy.
1. `objective/entropy`: The entropy of the model's policy, calculated by `-logprobs.sum(-1).mean()`. High entropy means the model's actions are more random, which can be beneficial for exploration.
Training stats:
1. `ppo/learning_rate`: The learning rate for the PPO algorithm.
1. `ppo/policy/entropy`: The entropy of the model's policy, calculated by `pd = torch.nn.functional.softmax(logits, dim=-1); entropy = torch.logsumexp(logits, dim=-1) - torch.sum(pd * logits, dim=-1)`. It measures the randomness of the policy.
1. `ppo/policy/clipfrac`: The fraction of probability ratios (old policy / new policy) that fell outside the clipping range in the PPO objective. This can be used to monitor the optimization process.
1. `ppo/policy/approxkl`: The approximate KL divergence between the old and new policies, measured by `0.5 * masked_mean((logprobs - old_logprobs) ** 2, mask)`, corresponding to the `k2` estimator in http://joschu.net/blog/kl-approx.html
1. `ppo/policy/policykl`: Similar to `ppo/policy/approxkl`, but measured by `masked_mean(old_logprobs - logprobs, mask)`, corresponding to the `k1` estimator in http://joschu.net/blog/kl-approx.html
1. `ppo/policy/ratio`: The histogram distribution of the ratio between the new and old policies, used to compute the PPO objective.
1. `ppo/policy/advantages_mean`: The average of the GAE (Generalized Advantage Estimation) advantage estimates. The advantage function measures how much better an action is compared to the average action at a state.
1. `ppo/policy/advantages`: The histogram distribution of `ppo/policy/advantages_mean`.
1. `ppo/returns/mean`: The mean of the TD(λ) returns, calculated by `returns = advantage + values`, another indicator of model performance. See https://iclr-blog-track.github.io/2022/03/25/ppo-implementation-details/ for more details.
1. `ppo/returns/var`: The variance of the TD(λ) returns, calculated by `returns = advantage + values`, another indicator of model performance.
1. `ppo/val/mean`: The mean of the values, used to monitor the value function's performance.
1. `ppo/val/var` : The variance of the values, used to monitor the value function's performance.
1. `ppo/val/var_explained`: The explained variance for the value function, used to monitor the value function's performance.
1. `ppo/val/clipfrac`: The fraction of the value function's predicted values that are clipped.
1. `ppo/val/vpred`: The predicted values from the value function.
1. `ppo/val/error`: The mean squared error between the `ppo/val/vpred` and returns, used to monitor the value function's performance.
1. `ppo/loss/policy`: The policy loss for the Proximal Policy Optimization (PPO) algorithm.
1. `ppo/loss/value`: The loss for the value function in the PPO algorithm. This value quantifies how well the function estimates the expected future rewards.
1. `ppo/loss/total`: The total loss for the PPO algorithm. It is the sum of the policy loss and the value function loss.
Stats on queries, responses, and logprobs:
1. `tokens/queries_len_mean`: The average length of the queries tokens.
1. `tokens/queries_len_std`: The standard deviation of the length of the queries tokens.
1. `tokens/queries_dist`: The histogram distribution of the length of the queries tokens.
1. `tokens/responses_len_mean`: The average length of the responses tokens.
1. `tokens/responses_len_std`: The standard deviation of the length of the responses tokens.
1. `tokens/responses_dist`: The histogram distribution of the length of the responses tokens. (Costa: inconsistent naming, should be `tokens/responses_len_dist`)
1. `objective/logprobs`: The histogram distribution of the log probabilities of the actions taken by the model.
1. `objective/ref_logprobs`: The histogram distribution of the log probabilities of the actions taken by the reference model.
### Crucial values
During training, many values are logged, here are the most important ones:
1. `env/reward_mean`,`env/reward_std`, `env/reward_dist`: the properties of the reward distribution from the "environment" / reward model
1. `ppo/mean_non_score_reward`: The mean negated KL penalty during training (shows the delta between the reference model and the new policy over the batch in the step)
Here are some parameters that are useful to monitor for stability (when these diverge or collapse to 0, try tuning variables):
1. `ppo/loss/value`: it will spike / NaN when not going well.
1. `ppo/policy/ratio`: `ratio` being 1 is a baseline value, meaning that the probability of sampling a token is the same under the new and old policy. If the ratio is too high like 200, it means the probability of sampling a token is 200 times higher under the new policy than the old policy. This is a sign that the new policy is too different from the old policy, which will likely cause overoptimization and collapse training later on.
1. `ppo/policy/clipfrac` and `ppo/policy/approxkl`: if `ratio` is too high, the `ratio` is going to get clipped, resulting in high `clipfrac` and high `approxkl` as well.
1. `objective/kl`: it should stay positive so that the policy is not too far away from the reference policy.
1. `objective/kl_coef`: The target coefficient with [`AdaptiveKLController`]. Often increases before numerical instabilities. | trl/docs/source/logging.mdx/0 | {
"file_path": "trl/docs/source/logging.mdx",
"repo_id": "trl",
"token_count": 1961
} | 426 |
compute_environment: LOCAL_MACHINE
debug: false
deepspeed_config:
deepspeed_multinode_launcher: standard
gradient_accumulation_steps: 1
offload_optimizer_device: none
offload_param_device: none
zero3_init_flag: true
zero3_save_16bit_model: true
zero_stage: 3
distributed_type: DEEPSPEED
downcast_bf16: 'no'
machine_rank: 0
main_training_function: main
mixed_precision: 'bf16'
num_machines: 1
num_processes: 8
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
| trl/examples/accelerate_configs/deepspeed_zero3.yaml/0 | {
"file_path": "trl/examples/accelerate_configs/deepspeed_zero3.yaml",
"repo_id": "trl",
"token_count": 205
} | 427 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from datetime import date
from tabulate import tabulate
MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters
parser = argparse.ArgumentParser()
parser.add_argument("--slack_channel_name", default="trl-push-examples-ci")
parser.add_argument("--text_file_name", required=True)
def main(text_file_name, slack_channel_name=None):
message = ""
if os.path.isfile(text_file_name):
final_results = {}
file = open(text_file_name, "r")
lines = file.readlines()
for line in lines:
result, config_name = line.split(",")
config_name = config_name.split("/")[-1].split(".yaml")[0]
final_results[config_name] = int(result)
no_error_payload = {
"type": "section",
"text": {
"type": "plain_text",
"text": "🌞 There were no failures on the example tests!"
if not len(final_results) == 0
else "Something went wrong there is at least one empty file - please check GH action results.",
"emoji": True,
},
}
total_num_failed = sum(final_results.values())
else:
no_error_payload = {
"type": "section",
"text": {
"type": "plain_text",
"text": "🔴 Something is wrong with the workflow please check ASAP!"
"Something went wrong there is no text file being produced. Please check ASAP.",
"emoji": True,
},
}
total_num_failed = 0
test_type_name = text_file_name.replace(".txt", "").replace("temp_results_", "").replace("_", " ").title()
payload = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "🤗 Results of the {} TRL {} example tests.".format(
os.environ.get("TEST_TYPE", ""), test_type_name
),
},
},
]
if total_num_failed > 0:
message += f"{total_num_failed} failed tests for example tests!"
for test_name, failed in final_results.items():
failed_table = tabulate(
[[test_name, "🟢" if not failed else "🔴"]],
headers=["Test Name", "Status"],
showindex="always",
tablefmt="grid",
maxcolwidths=[12],
)
message += "\n```\n" + failed_table + "\n```"
print(f"### {message}")
else:
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
if len(message) > MAX_LEN_MESSAGE:
print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}")
message = message[:MAX_LEN_MESSAGE] + "..."
if len(message) != 0:
md_report = {
"type": "section",
"text": {"type": "mrkdwn", "text": message},
}
payload.append(md_report)
action_button = {
"type": "section",
"text": {"type": "mrkdwn", "text": "*For more details:*"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/trl/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
date_report = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"On Push - main {os.environ.get('TEST_TYPE')} test results for {date.today()}",
},
],
}
payload.append(date_report)
print(payload)
client = WebClient(token=os.environ.get("SLACK_API_TOKEN"))
client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload)
if __name__ == "__main__":
args = parser.parse_args()
main(args.text_file_name, args.slack_channel_name)
| trl/scripts/log_example_reports.py/0 | {
"file_path": "trl/scripts/log_example_reports.py",
"repo_id": "trl",
"token_count": 2272
} | 428 |
import subprocess
def test_hello_world():
subprocess.run(
"python examples/hello_world.py",
shell=True,
check=True,
)
| trl/tests/test_e2e.py/0 | {
"file_path": "trl/tests/test_e2e.py",
"repo_id": "trl",
"token_count": 69
} | 429 |
from typing import Any, Callable, List, Optional, Union
import torch
from transformers import GenerationConfig, PreTrainedTokenizer, PreTrainedTokenizerFast
from ..core import set_seed
from ..models import SUPPORTED_ARCHITECTURES, PreTrainedModelWrapper
class BestOfNSampler(object):
def __init__(
self,
model: PreTrainedModelWrapper,
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
queries_to_scores: Callable[[List[str]], List[float]],
length_sampler: Any,
sample_size: int = 4,
seed: Optional[int] = None,
n_candidates: int = 1,
generation_config: Optional[GenerationConfig] = None,
) -> None:
r"""
Initialize the sampler for best-of-n generation
Args:
model (`PreTrainedModelWrapper`):
The pretrained model to use for generation
tokenizer (`PreTrainedTokenizer` or `PreTrainedTokenizerFast`):
Tokenizer associated with the pretrained model
queries_to_scores (`Callable[[List[str]], List[float]]`):
Callable that takes a list of generated texts and returns the associated reward scores
length_sampler (`Any`):
Sampler used to sample the length of the generated text
sample_size (`int`):
Number of samples to generate for each query
seed (`int`, *optional*):
Random seed used to control generation
n_candidates (`int`):
Number of candidates to return for each query
generation_config (`GenerationConfig`, *optional*):
Generation config passed to the underlying model's `generate` method.
See `GenerationConfig` (https://huggingface.co/docs/transformers/v4.29.1/en/main_classes/text_generation#transformers.GenerationConfig) for more details
"""
if seed is not None:
set_seed(seed)
if not isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)):
raise ValueError(
f"tokenizer must be a PreTrainedTokenizer or PreTrainedTokenizerFast, got {type(tokenizer)}"
)
if not isinstance(model, (SUPPORTED_ARCHITECTURES)):
raise ValueError(
f"model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}"
)
self.model = model
self.tokenizer = tokenizer
self.queries_to_scores = queries_to_scores
self.length_sampler = length_sampler
self.gen_config = generation_config
self.sample_size = sample_size
self.n_candidates = n_candidates
def generate(
self,
tokenized_query: Union[List[int], torch.Tensor, List[torch.Tensor], List[List[int]]],
skip_special_tokens: bool = True,
device: Optional[Union[str, torch.device]] = None,
**generation_kwargs,
) -> List[List[str]]:
r"""
Generate the best of n samples for input queries
Args:
tokenized_query (`List[int]` or `torch.Tensor` or `List[torch.Tensor]` or `List[int]`):
represents either a single tokenized query (a single tensor or a list of integers) or a batch of tokenized queries (a list of tensors or a list of lists of integers)
skip_special_tokens (`bool`):
Whether to remove the special tokens from the output
device (`str` or `torch.device`, *optional*):
The device on which the model will be loaded
**generation_kwargs (`dict`, *optional*):
Additional keyword arguments passed along to the underlying model's `generate` method.
This is used to override generation config
Returns:
List[List[str]]: A list of lists of generated texts
"""
queries = None
if isinstance(tokenized_query, torch.Tensor) and tokenized_query.ndim == 1:
queries = tokenized_query.unsqueeze(0)
elif isinstance(tokenized_query, List):
element_type = type(tokenized_query[0])
if element_type == int:
queries = torch.tensor(tokenized_query).unsqueeze(0)
elif element_type == torch.Tensor:
queries = [tensor.reshape((1, -1)) for tensor in tokenized_query]
else:
queries = [torch.tensor(query).reshape((1, -1)) for query in tokenized_query]
result = []
for query in queries:
queries = query.repeat((self.sample_size, 1))
output = self.model.generate(
queries.to(device),
max_new_tokens=self.length_sampler(),
generation_config=self.gen_config,
**generation_kwargs,
).squeeze()
output = self.tokenizer.batch_decode(output, skip_special_tokens=skip_special_tokens)
scores = torch.tensor(self.queries_to_scores(output))
output = [output[i] for i in scores.topk(self.n_candidates).indices]
result.append(output)
return result
| trl/trl/extras/best_of_n_sampler.py/0 | {
"file_path": "trl/trl/extras/best_of_n_sampler.py",
"repo_id": "trl",
"token_count": 2255
} | 430 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
import os
import time
import typing
import warnings
from contextlib import nullcontext
from typing import Callable, List, Optional, Union
import datasets
import numpy as np
import torch
import torch.nn.functional as F
from accelerate import Accelerator
from accelerate.utils import ProjectConfiguration, gather_object, is_deepspeed_available
from datasets import Dataset
from huggingface_hub import whoami
from packaging import version
from torch.optim import Adam
from transformers import (
DataCollatorForLanguageModeling,
PreTrainedTokenizer,
PreTrainedTokenizerBase,
PreTrainedTokenizerFast,
)
from ..core import (
WANDB_PADDING,
PPODecorators,
clip_by_value,
convert_to_scalar,
entropy_from_logits,
flatten_dict,
logprobs_from_logits,
masked_mean,
masked_var,
masked_whiten,
set_seed,
stack_dicts,
stats_to_np,
)
from ..import_utils import is_npu_available, is_torch_greater_2_0, is_xpu_available
from ..models import SUPPORTED_ARCHITECTURES, PreTrainedModelWrapper, create_reference_model
from . import AdaptiveKLController, BaseTrainer, FixedKLController, PPOConfig, RunningMoments
if is_deepspeed_available():
import deepspeed
MODEL_CARD_TEMPLATE = """---
license: apache-2.0
tags:
- trl
- ppo
- transformers
- reinforcement-learning
---
# {model_name}
This is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to
guide the model outputs according to a value, function, or human feedback. The model can be used for text generation.
## Usage
To use this model for inference, first install the TRL library:
```bash
python -m pip install trl
```
You can then generate text as follows:
```python
from transformers import pipeline
generator = pipeline("text-generation", model="{model_id}")
outputs = generator("Hello, my llama is cute")
```
If you want to use the model for training or to obtain the outputs from the value head, load the model as follows:
```python
from transformers import AutoTokenizer
from trl import AutoModelForCausalLMWithValueHead
tokenizer = AutoTokenizer.from_pretrained("{model_id}")
model = AutoModelForCausalLMWithValueHead.from_pretrained("{model_id}")
inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
outputs = model(**inputs, labels=inputs["input_ids"])
```
"""
class PPOTrainer(BaseTrainer):
"""
The PPOTrainer uses Proximal Policy Optimization to optimise language models.
Note, this trainer is heavily inspired by the original OpenAI learning to summarize work here:
https://github.com/openai/summarize-from-feedback
Attributes:
**config** (`PPOConfig`) -- Configuration object for PPOTrainer. Check the documentation of `PPOConfig` for more
details.
**model** (`PreTrainedModelWrapper`) -- Model to be optimized, Hugging Face transformer model with a value head.
Check the documentation of `PreTrainedModelWrapper` for more details.
**ref_model** (`PreTrainedModelWrapper`, *optional*) -- Reference model to be used for KL penalty, Hugging Face
transformer model with a casual language modelling head. Check the documentation of `PreTrainedModelWrapper`
for more details. If no reference model is provided, the trainer will create a reference model with the same
architecture as the model to be optimized with shared layers.
**tokenizer** (`PreTrainedTokenizerBase`) -- Tokenizer to be used for encoding the
data. Check the documentation of `transformers.PreTrainedTokenizer` and
`transformers.PreTrainedTokenizerFast` for more details.
**dataset** (Union[`torch.utils.data.Dataset`, `datasets.Dataset`], *optional*) -- PyTorch dataset or Hugging
Face dataset. This is used to create a PyTorch dataloader. If no dataset is provided, the dataloader must be
created outside the trainer users needs to design their own dataloader and make sure the batch
size that is used is the same as the one specified in the configuration object.
**optimizer** (`torch.optim.Optimizer`, *optional*) -- Optimizer to be used for training. If no optimizer is
provided, the trainer will create an Adam optimizer with the learning rate specified in the configuration
object.
**data_collator** (DataCollatorForLanguageModeling, *optional*) -- Data collator to be used for training and
passed along the dataloader
**num_shared_layers** (int, *optional*) -- Number of layers to be shared between the model and the reference
model, if no reference model is passed. If no number is provided, all the layers will be shared.
**lr_scheduler** (`torch.optim.lr_scheduler`, *optional*) -- Learning rate scheduler to be used for training.
"""
_tag_names = ["trl", "ppo"]
def __init__(
self,
config: Optional[PPOConfig] = None,
model: Optional[PreTrainedModelWrapper] = None,
ref_model: Optional[PreTrainedModelWrapper] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
dataset: Optional[Union[torch.utils.data.Dataset, Dataset]] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
data_collator: Optional[typing.Callable] = None,
num_shared_layers: Optional[int] = None,
lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
):
"""
Initialize PPOTrainer.
Args:
config (`PPOConfig`):
Configuration object for PPOTrainer. Check the documentation of `PPOConfig` for more details.
model (`PreTrainedModelWrapper`):
Hugging Face transformer model with a value head.
ref_model (`PreTrainedModelWrapper`):
Hugging Face transformer model with a casual language modelling head. Used for KL penalty
tokenizer (`transformers.PreTrainedTokenizerBase`):
Hugging Face tokenizer
dataset (Optional[Union[`torch.utils.data.Dataset`, `datasets.Dataset`]]):
PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset
will be preprocessed by removing the columns that are not used by the model. If none is passed,
a warning will be raised in a multi-GPU setting.
optimizer (Optional[`torch.optim.Optimizer`]):
Optimizer used for training. If `None`, the `Adam` is used as default.
data_collator (Optional[function]):
Data collator function.
num_shared_layers (Optional[int]):
Number of shared layers between the model and the reference model. If `None`, all layers are shared.
used only if `ref_model` is `None`.
lr_scheduler (Optional[`torch.optim.lr_scheduler`]):
Learning rate scheduler used for training.
"""
super().__init__(config)
# initial seed for reproducible experiments
set_seed(config.seed)
# Step 0: check positional arguments validity
if not isinstance(config, PPOConfig):
raise ValueError(f"config must be a PPOConfig, got {type(config)}")
if not isinstance(tokenizer, (PreTrainedTokenizerBase)):
raise ValueError(
f"tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}"
)
if not isinstance(model, (SUPPORTED_ARCHITECTURES)):
raise ValueError(
f"model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}"
)
# Step 1: Initialize Accelerator
self.accelerator = Accelerator(
log_with=config.log_with,
gradient_accumulation_steps=config.gradient_accumulation_steps,
project_config=ProjectConfiguration(**config.project_kwargs),
**config.accelerator_kwargs,
)
# Step 1.1 Runtime variables filled by the accelerator
config.world_size = self.accelerator.num_processes
config.global_backward_batch_size = config.backward_batch_size * config.world_size
config.global_batch_size = config.batch_size * config.world_size
self.model = model
self.model_params = filter(lambda p: p.requires_grad, self.model.parameters())
self.is_encoder_decoder = hasattr(self.model, "is_encoder_decoder")
self.is_peft_model = getattr(self.model, "is_peft_model", False)
config.is_encoder_decoder = self.is_encoder_decoder
config.is_peft_model = self.is_peft_model
is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard"
self.accelerator.init_trackers(
config.tracker_project_name,
config=dict(trl_ppo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(),
init_kwargs=config.tracker_kwargs,
)
self.is_using_text_environment = getattr(config, "use_text_environment", False)
if isinstance(ref_model, SUPPORTED_ARCHITECTURES):
self.ref_model = ref_model
if num_shared_layers is not None:
warnings.warn(
"num_shared_layers is ignored when ref_model is provided. Two different models are used for the "
"model and the reference model and no layers are shared.",
UserWarning,
)
elif ref_model is None and not self.is_peft_model:
self.ref_model = create_reference_model(self.model, num_shared_layers=num_shared_layers)
elif self.is_peft_model:
self.ref_model = None
else:
raise ValueError(
f"ref_model must be a PreTrainedModelWrapper or `None`, got {type(ref_model)} - supported "
f"architectures are: {SUPPORTED_ARCHITECTURES} "
)
self.optional_peft_ctx = (
self.accelerator.unwrap_model(self.model).pretrained_model.disable_adapter
if self.is_peft_model
else nullcontext
)
if not (isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError(
"tokenizer must be a transformers.PreTrainedTokenizer or transformers.PreTrainedTokenizerFast"
)
self.tokenizer = tokenizer
if dataset is not None and not (isinstance(dataset, torch.utils.data.Dataset) or isinstance(dataset, Dataset)):
raise ValueError("dataset must be a torch.utils.data.Dataset or datasets.Dataset")
elif dataset is None:
warnings.warn(
"No dataset is provided. Make sure to set config.batch_size to the correct value before training.",
UserWarning,
)
self.dataset = dataset
self._signature_columns = None
if self.dataset is not None:
self.dataloader = self.prepare_dataloader(self.dataset, data_collator)
elif self.dataset is None and self.accelerator.num_processes > 1:
warnings.warn(
"No dataset is provided. In a multi-GPU setting, this will lead to an error. You should"
" prepare your dataloader yourself with `dataloader = ppo_trainer.accelerator.prepare(dataloader)`"
" and using `torch.utils.data.DataLoader`, or pass a dataset to the `PPOTrainer`. Please "
" refer to the documentation for more details.",
UserWarning,
)
self.dataloader = None
else:
self.dataloader = None
# Step 3: Initialize optimizer and data collator
self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False)
if optimizer is None:
self.optimizer = Adam(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.config.learning_rate,
)
else:
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
if self.lr_scheduler is not None:
lr_scheduler_class = (
torch.optim.lr_scheduler._LRScheduler
if not is_torch_greater_2_0()
else torch.optim.lr_scheduler.LRScheduler
)
if not isinstance(self.lr_scheduler, lr_scheduler_class):
raise ValueError(
"lr_scheduler must be a torch.optim.lr_scheduler._LRScheduler or torch.optim.lr_scheduler.LRScheduler (for torch >= 2.0)"
)
if self.config.adap_kl_ctrl:
self.kl_ctl = AdaptiveKLController(self.config.init_kl_coef, self.config.target, self.config.horizon)
else:
self.kl_ctl = FixedKLController(self.config.init_kl_coef)
# Safety checkers for DS integration
is_deepspeed_used = self.accelerator.distributed_type == "DEEPSPEED" and hasattr(
self.accelerator.state, "deepspeed_plugin"
)
(
self.model,
self.optimizer,
self.data_collator,
self.dataloader,
self.lr_scheduler,
) = self.accelerator.prepare(
self.model,
self.optimizer,
self.data_collator,
self.dataloader,
self.lr_scheduler,
)
if is_deepspeed_used:
# Quantized models are already set on the correct device
if not self.is_peft_model and not (
getattr(self.ref_model.pretrained_model, "is_loaded_in_8bit", False)
or getattr(self.ref_model.pretrained_model, "is_loaded_in_4bit", False)
):
self.ref_model = self._prepare_deepspeed(self.ref_model)
else:
self.ref_model = self.accelerator.prepare(self.ref_model)
# In a distributed setup, only logging needs to be performed on the main process
# check: https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html
# or: https://discuss.pytorch.org/t/use-distributed-data-parallel-correctly/82500/11
self.is_distributed = self.accelerator.num_processes > 1
# init the current step
self.current_step = 0
# init variables for pushing model to hub
if config.push_to_hub_if_best_kwargs:
if "repo_id" not in config.push_to_hub_if_best_kwargs:
raise ValueError("You have to specify repo_id in order to push the model to the hub!")
self.push_to_hub_kwargs = config.push_to_hub_if_best_kwargs
self.compare_step = 0
self.highest_reward = torch.tensor(-float("inf"))
# post process for PP
if not getattr(self.model, "is_sequential_parallel", False):
self.current_device = self.accelerator.device
else:
if is_xpu_available():
self.current_device = torch.device("xpu:0")
elif is_npu_available():
self.current_device = torch.device("npu:0")
else:
self.current_device = torch.device("cuda:0")
PPODecorators.optimize_device_cache = self.config.optimize_device_cache
self.running = RunningMoments(self.accelerator)
def _filter_kwargs(self, kwargs, target_func):
"""
filter the keyword arguments that are supported by the target function.
Args:
kwargs (dict):
Keyword arguments
target_func (function):
Target function
"""
return {k: v for k, v in kwargs.items() if k in inspect.signature(target_func).parameters.keys()}
def prepare_dataloader(self, dataset: Union[torch.utils.data.Dataset, Dataset], data_collator=None):
"""
Prepare the dataloader for training.
Args:
dataset (Union[`torch.utils.data.Dataset`, `datasets.Dataset`]):
PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset
will be preprocessed by removing the columns that are not used by the model.
data_collator (Optional[function]):
Data collator function.
Returns:
`torch.utils.data.DataLoader`: PyTorch dataloader
"""
if isinstance(dataset, Dataset):
dataset = self._remove_unused_columns(dataset)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.config.batch_size,
collate_fn=data_collator,
shuffle=True,
drop_last=True,
)
return dataloader
# Adapted from transformers.Trainer._set_signature_columns_if_needed
def _set_signature_columns_if_needed(self):
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# label => sentiment | we need query and response for logging purpose
self._signature_columns += ["label", "query", "response"]
# Adapted from transformers.Trainer._remove_unused_columns
def _remove_unused_columns(self, dataset: "Dataset"):
if not self.config.remove_unused_columns:
return dataset
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
columns = [k for k in signature_columns if k in dataset.column_names]
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"],
columns=columns,
format_kwargs=dataset.format["format_kwargs"],
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def generate(
self,
query_tensor: Union[torch.Tensor, List[torch.Tensor]],
length_sampler: Optional[Callable] = None,
batch_size: int = 4,
return_prompt: bool = True,
generate_ref_response: bool = False,
**generation_kwargs,
):
"""
Generate response with the model given the query tensor.
call the `generate` method of the model.
Args:
query_tensor (`torch.LongTensor`):
A tensor of shape (`seq_len`) containing query tokens or a list of tensors of shape (`seq_len`).
length_sampler (`Callable`, *optional*):
Callable that returns the number of newly generated tokens.
batch_size (`int`, *optional):
Batch size used for generation, defaults to `4`.
return_prompt (`bool`, *optional*):
If set to `False` the prompt is not returned but only the newly generated tokens, defaults to `True`.
generate_ref_response (`bool`, *optional*):
If set to `True` the reference response is also generated, defaults to `False`.
generation_kwargs (dict[str, Any]):
Keyword arguments for generation.
Returns:
`torch.LongTensor`: A tensor of shape (`batch_size`, `gen_len`) containing response tokens.
"""
if generate_ref_response:
ref_model = self.model if self.is_peft_model else self.ref_model
if isinstance(query_tensor, List):
response = self._generate_batched(
self.model,
query_tensor,
length_sampler=length_sampler,
batch_size=batch_size,
return_prompt=return_prompt,
**generation_kwargs,
)
if generate_ref_response:
with self.optional_peft_ctx():
ref_response = self._generate_batched(
ref_model,
query_tensor,
length_sampler=length_sampler,
batch_size=batch_size,
return_prompt=return_prompt,
**generation_kwargs,
)
else:
if len(query_tensor.shape) == 2:
raise ValueError(
"query_tensor must be a tensor of shape (`seq_len`) or a list of tensors of shape (`seq_len`)"
)
if length_sampler is not None:
generation_kwargs["max_new_tokens"] = length_sampler()
response = self.accelerator.unwrap_model(self.model).generate(
input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs
)
if generate_ref_response:
with self.optional_peft_ctx():
ref_response = ref_model.generate(input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs)
if not return_prompt and not self.is_encoder_decoder:
response = response[:, query_tensor.shape[0] :]
if generate_ref_response:
ref_response = ref_response[:, query_tensor.shape[0] :]
if generate_ref_response:
return response, ref_response
return response
def _generate_batched(
self,
model: PreTrainedModelWrapper,
query_tensors: List[torch.Tensor],
length_sampler: Optional[Callable] = None,
batch_size: int = 4,
return_prompt: bool = True,
pad_to_multiple_of: Optional[int] = None,
remove_padding: bool = True,
**generation_kwargs,
):
outputs = []
padding_side_default = self.tokenizer.padding_side
if not self.is_encoder_decoder:
self.tokenizer.padding_side = "left"
# in case we have fewer examples than bs
batch_size = min(len(query_tensors), batch_size)
for i in range(0, len(query_tensors), batch_size):
if length_sampler is not None:
generation_kwargs["max_new_tokens"] = length_sampler()
# prevent overflow if query tensors are not even multiple of bs
end_index = min(len(query_tensors), i + batch_size)
batch = query_tensors[i:end_index]
batch_mask = [torch.ones_like(element) for element in batch]
inputs = {"input_ids": batch, "attention_mask": batch_mask}
padded_inputs = self.tokenizer.pad(
inputs,
padding=True,
max_length=None,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
).to(self.current_device)
generations = self.accelerator.unwrap_model(model).generate(**padded_inputs, **generation_kwargs)
for generation, mask in zip(generations, padded_inputs["attention_mask"]):
if not self.is_encoder_decoder:
output = generation[(1 - mask).sum() :] # remove padding
else:
output = generation
if not return_prompt and not self.is_encoder_decoder:
output = output[(mask).sum() :] # remove prompt
if remove_padding and self.tokenizer.eos_token_id in output:
pad_mask = output == self.tokenizer.eos_token_id
pad_start = torch.nonzero(pad_mask, as_tuple=False)[0, 0].item()
output = output[: pad_start + 1] # keep the eos token at the end
outputs.append(output)
self.tokenizer.padding_side = padding_side_default
return outputs
def _step_safety_checker(
self,
batch_size: int,
queries: List[torch.LongTensor],
responses: List[torch.LongTensor],
scores: List[torch.FloatTensor],
masks: Optional[List[torch.LongTensor]] = None,
):
"""
Check if the input data is valid for training.
Args:
batch_size (int):
Batch size from the config file.
queries (List[`torch.LongTensor`]):
List of tensors containing the encoded queries of shape (`query_length`)
responses (List[`torch.LongTensor`]):
List of tensors containing the encoded responses of shape (`response_length`)
scores (List[`torch.FloatTensor`]):
List of tensors containing the scores.
masks (List[`torch.LongTensor`], *optional*):
list of optional tensors containing the masks of shape (`query_length` + `response_length`)
Returns:
`tuple`: The input processed data.
"""
for name, tensor_list in zip(["queries", "responses", "scores"], [queries, responses, scores]):
if not isinstance(tensor_list, list):
raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}")
if not isinstance(tensor_list[0], torch.Tensor):
raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}")
if batch_size is not None and len(tensor_list) != batch_size:
raise ValueError(
f"Batch size ({batch_size}) does not match number of examples - but got {len(tensor_list)} for: {name}"
)
# add queries, scores and responses on the correct device
queries = [tensor.to(self.current_device) for tensor in queries]
responses = [tensor.to(self.current_device) for tensor in responses]
scores = [tensor.to(self.current_device) for tensor in scores]
masks = [tensor.to(self.current_device) for tensor in masks] if masks is not None else None
# squeeze scores if needed
for i, score in enumerate(scores):
if score.dim() > 1:
raise ValueError(f"Scores must be 1-dimensional - got {score.dim()} for {score}")
elif score.dim() == 1:
scores[i] = score.squeeze()
return queries, responses, scores, masks
@PPODecorators.empty_device_cache()
def step(
self,
queries: List[torch.LongTensor],
responses: List[torch.LongTensor],
scores: List[torch.FloatTensor],
response_masks: Optional[List[torch.LongTensor]] = None,
):
"""
Run a PPO optimisation step given a list of queries, model responses, and rewards.
Args:
queries (List[`torch.LongTensor`]):
List of tensors containing the encoded queries of shape (`query_length`)
responses (List[`torch.LongTensor`]):
List of tensors containing the encoded responses of shape (`response_length`)
scores (List[`torch.FloatTensor`]):
List of tensors containing the scores.
response_masks (List[`torch.FloatTensor`], *optional*)):
List of tensors containing masks of the response tokens.
Returns:
`dict[str, Any]`: A summary of the training statistics
"""
bs = self.config.batch_size
queries, responses, scores, response_masks = self._step_safety_checker(
bs, queries, responses, scores, response_masks
)
scores = torch.tensor(scores, device=self.current_device)
if self.config.use_score_scaling:
# Score scaling
scores_mean, scores_std = self.running.update(scores)
tensor_to_kwargs = dict(dtype=scores.dtype, device=scores.device)
score_scaling_factor = self.running.std.to(**tensor_to_kwargs) + torch.finfo(scores.dtype).eps
if self.config.use_score_norm:
scores = (scores - self.running.mean.to(**tensor_to_kwargs)) / score_scaling_factor
else:
scores /= score_scaling_factor
if self.config.score_clip is not None:
# Score clipping
scores_dtype = scores.dtype
scores = torch.clip(scores.float(), -self.config.score_clip, self.config.score_clip).to(dtype=scores_dtype)
# if we want to push best model to the hub
if hasattr(self, "highest_reward"):
if self.compare_step % self.config.compare_steps == 0:
curr_mean_reward = scores.mean()
# if the best reward ever seen
if curr_mean_reward > self.highest_reward:
self.highest_reward = curr_mean_reward
# push model to hub
self.push_to_hub(**self.push_to_hub_kwargs)
self.compare_step += 1
timing = dict()
t0 = time.time()
t = time.time()
model_inputs = self.prepare_model_inputs(queries, responses)
if self.is_distributed:
pad_first = self.tokenizer.padding_side == "left"
model_inputs["input_ids"] = self.accelerator.pad_across_processes(
model_inputs["input_ids"],
dim=1,
pad_index=self.tokenizer.pad_token_id,
pad_first=pad_first,
)
model_inputs["attention_mask"] = self.accelerator.pad_across_processes(
model_inputs["attention_mask"], dim=1, pad_index=0, pad_first=pad_first
)
if self.is_encoder_decoder:
model_inputs["decoder_input_ids"] = self.accelerator.pad_across_processes(
model_inputs["decoder_input_ids"],
dim=1,
pad_index=self.tokenizer.pad_token_id,
pad_first=pad_first,
)
model_inputs["decoder_attention_mask"] = self.accelerator.pad_across_processes(
model_inputs["decoder_attention_mask"],
dim=1,
pad_index=0,
pad_first=pad_first,
)
model_inputs_names = list(model_inputs.keys())
full_kl_penalty = self.config.kl_penalty == "full"
with torch.no_grad():
all_logprobs, logits_or_none, values, masks = self.batched_forward_pass(
self.model,
queries,
responses,
model_inputs,
response_masks=response_masks,
return_logits=full_kl_penalty,
)
with self.optional_peft_ctx():
ref_logprobs, ref_logits_or_none, _, _ = self.batched_forward_pass(
self.model if self.is_peft_model else self.ref_model,
queries,
responses,
model_inputs,
return_logits=full_kl_penalty,
)
timing["time/ppo/forward_pass"] = time.time() - t
with torch.no_grad():
t = time.time()
if full_kl_penalty:
active_full_logprobs = logprobs_from_logits(logits_or_none, None, gather=False)
ref_full_logprobs = logprobs_from_logits(ref_logits_or_none, None, gather=False)
rewards, non_score_reward, kls = self.compute_rewards(
scores, active_full_logprobs, ref_full_logprobs, masks
)
else:
rewards, non_score_reward, kls = self.compute_rewards(scores, all_logprobs, ref_logprobs, masks)
timing["time/ppo/compute_rewards"] = time.time() - t
t = time.time()
values, advantages, returns = self.compute_advantages(values, rewards, masks)
timing["time/ppo/compute_advantages"] = time.time() - t
# upcast to float32 to avoid dataset issues
batch_dict = {
"queries": queries,
"responses": responses,
"logprobs": all_logprobs.to(torch.float32),
"values": values.to(torch.float32),
"masks": masks,
"advantages": advantages,
"returns": returns,
}
batch_dict.update(model_inputs)
t = time.time()
all_stats = []
early_stop = False
for _ in range(self.config.ppo_epochs):
if early_stop:
break
b_inds = np.random.permutation(bs)
for backward_batch_start in range(0, bs, self.config.backward_batch_size):
backward_batch_end = backward_batch_start + self.config.backward_batch_size
backward_batch_inds = b_inds[backward_batch_start:backward_batch_end]
for mini_batch_start in range(0, self.config.backward_batch_size, self.config.mini_batch_size):
mini_batch_end = mini_batch_start + self.config.mini_batch_size
mini_batch_inds = backward_batch_inds[mini_batch_start:mini_batch_end]
mini_batch_dict = {
"logprobs": batch_dict["logprobs"][mini_batch_inds],
"values": batch_dict["values"][mini_batch_inds],
"masks": batch_dict["masks"][mini_batch_inds],
# hacks: the queries and responses are ragged.
"queries": [batch_dict["queries"][i] for i in mini_batch_inds],
"responses": [batch_dict["responses"][i] for i in mini_batch_inds],
"advantages": batch_dict["advantages"][mini_batch_inds],
"returns": batch_dict["returns"][mini_batch_inds],
}
for k in model_inputs_names:
mini_batch_dict[k] = batch_dict[k][mini_batch_inds]
with self.accelerator.accumulate(self.model):
model_inputs = {k: mini_batch_dict[k] for k in model_inputs_names}
logprobs, logits, vpreds, _ = self.batched_forward_pass(
self.model,
mini_batch_dict["queries"],
mini_batch_dict["responses"],
model_inputs,
return_logits=True,
)
train_stats = self.train_minibatch(
mini_batch_dict["logprobs"],
mini_batch_dict["values"],
logprobs,
logits,
vpreds,
mini_batch_dict["masks"],
mini_batch_dict["advantages"],
mini_batch_dict["returns"],
)
all_stats.append(train_stats)
# typically, early stopping is done at the epoch level
if self.config.early_stopping:
policykl = train_stats["policy/policykl"]
early_stop = self._early_stop(policykl)
if early_stop:
break
timing["time/ppo/optimize_step"] = time.time() - t
t = time.time()
train_stats = stack_dicts(all_stats)
# reshape advantages/ratios such that they are not averaged.
train_stats["policy/advantages"] = torch.flatten(train_stats["policy/advantages"]).unsqueeze(0)
train_stats["policy/advantages"] = torch.nan_to_num(train_stats["policy/advantages"], WANDB_PADDING)
train_stats["policy/ratio"] = torch.flatten(train_stats["policy/ratio"]).unsqueeze(0)
stats = self.record_step_stats(
scores=scores,
logprobs=all_logprobs,
ref_logprobs=ref_logprobs,
non_score_reward=non_score_reward,
train_stats=train_stats,
kl_coef=self.kl_ctl.value,
masks=masks,
queries=queries,
responses=responses,
kls=kls,
)
# Gather/Reduce stats from all processes
if self.is_distributed:
stats = self.gather_stats(stats)
stats = stats_to_np(stats)
timing["time/ppo/calc_stats"] = time.time() - t
stats["ppo/learning_rate"] = self.optimizer.param_groups[0]["lr"]
# Update the KL control - multiply the batch_size by the number of processes
self.kl_ctl.update(
stats["objective/kl"],
self.config.batch_size * self.accelerator.num_processes,
)
# Log the total ppo time
timing["time/ppo/total"] = time.time() - t0
stats.update(timing)
# post-process stats for tensorboard and other loggers
if self.config.log_with != "wandb":
stats = convert_to_scalar(stats)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return stats
def _early_stop(self, policykl):
r"""
Handles the early stopping logic. If the policy KL is greater than the target KL, then the gradient is zeroed and
the optimization step is skipped.
This also handles the multi-gpu case where the policy KL is averaged across all processes.
Args:
policy_kl (torch.Tensor):
the policy KL
Returns:
`bool`: whether to early stop or not
"""
early_stop = False
if not self.config.early_stopping:
return early_stop
if not self.is_distributed and policykl > 1.5 * self.config.target_kl:
self.optimizer.zero_grad()
early_stop = True
elif self.is_distributed:
import torch.distributed as dist
# Wait for all processes to finish
dist.barrier()
# all gather the policykl
dist.all_reduce(policykl, dist.ReduceOp.SUM)
policykl /= self.accelerator.num_processes
if policykl > 1.5 * self.config.target_kl:
self.optimizer.zero_grad()
early_stop = True
return early_stop
def gather_stats(self, stats):
"""
Gather stats from all processes. Useful in the context of distributed training.
Args:
stats (dict[str, Any]):
a dictionary of stats to be gathered. The stats should contain torch tensors.
Returns:
`dict[str, Any]`: A dictionary of stats with the tensors gathered.
"""
import torch.distributed as dist
# Wait for all processes to finish
dist.barrier()
for k, v in stats.items():
if isinstance(v, torch.Tensor):
dist.all_reduce(v.to(self.accelerator.device), dist.ReduceOp.SUM)
v /= self.accelerator.num_processes
stats[k] = v
return stats
def prepare_model_inputs(self, queries: torch.Tensor, responses: torch.Tensor):
if self.is_encoder_decoder:
input_data = self.data_collator(
[{"input_ids": q, "attention_mask": torch.ones_like(q)} for q in queries]
).to(self.current_device)
decoder_inputs = self.data_collator(
[{"input_ids": r, "attention_mask": torch.ones_like(r)} for r in responses]
).to(self.current_device)
input_data["decoder_input_ids"] = decoder_inputs["input_ids"]
input_data["decoder_attention_mask"] = decoder_inputs["attention_mask"]
else:
input_ids = [torch.cat([q, r]) for q, r in zip(queries, responses)]
input_data = self.data_collator(
[{"input_ids": ids, "attention_mask": torch.ones_like(ids)} for ids in input_ids]
).to(self.current_device)
input_data.pop("labels", None) # we don't want to compute LM losses
return input_data
@PPODecorators.empty_device_cache()
def batched_forward_pass(
self,
model: PreTrainedModelWrapper,
queries: torch.Tensor,
responses: torch.Tensor,
model_inputs: dict,
return_logits: bool = False,
response_masks: Optional[torch.Tensor] = None,
):
"""
Calculate model outputs in multiple batches.
Args:
queries (`torch.LongTensor`):
List of tensors containing the encoded queries, shape (`batch_size`, `query_length`)
responses (`torch.LongTensor`):
List of tensors containing the encoded responses, shape (`batch_size`, `response_length`)
return_logits (`bool`, *optional*, defaults to `False`):
Whether to return all_logits. Set to `False` if logits are not needed to reduce memory consumption.
Returns:
(tuple):
- all_logprobs (`torch.FloatTensor`): Log probabilities of the responses,
shape (`batch_size`, `response_length`)
- all_ref_logprobs (`torch.FloatTensor`): Log probabilities of the responses,
shape (`batch_size`, `response_length`)
- all_values (`torch.FloatTensor`): Values of the responses, shape (`batch_size`, `response_length`)
"""
bs = len(queries)
fbs = self.config.mini_batch_size
all_logprobs = []
all_logits = []
all_masks = []
all_values = []
model.eval()
for i in range(math.ceil(bs / fbs)):
input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()}
query_batch = queries[i * fbs : (i + 1) * fbs]
response_batch = responses[i * fbs : (i + 1) * fbs]
if response_masks is not None:
response_masks_batch = response_masks[i * fbs : (i + 1) * fbs]
logits, _, values = model(**input_kwargs)
if self.is_encoder_decoder:
input_ids = input_kwargs["decoder_input_ids"]
attention_mask = input_kwargs["decoder_attention_mask"]
else:
input_ids = input_kwargs["input_ids"]
attention_mask = input_kwargs["attention_mask"]
logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:])
masks = torch.zeros_like(attention_mask)
masks[:, :-1] = attention_mask[:, 1:]
for j in range(len(query_batch)):
if self.is_encoder_decoder:
# Decoder sentence starts always in the index 1 after padding in the Enc-Dec Models
start = 1
end = attention_mask[j, :].sum() - 1
else:
start = len(query_batch[j]) - 1 # logprobs starts from the second query token
if attention_mask[j, 0] == 0: # offset left padding
start += attention_mask[j, :].nonzero()[0]
end = start + len(response_batch[j])
if response_masks is not None:
response_masks_batch[j] = torch.cat(
(torch.zeros_like(query_batch[j]), response_masks_batch[j])
)[1:]
masks[j, :start] = 0
masks[j, end:] = 0
if response_masks is not None:
masks[j, start:end] = masks[j, start:end] * response_masks_batch[j][start:end]
if return_logits:
all_logits.append(logits)
else:
del logits
all_values.append(values)
all_logprobs.append(logprobs)
all_masks.append(masks)
return (
torch.cat(all_logprobs),
torch.cat(all_logits)[:, :-1] if return_logits else None,
torch.cat(all_values)[:, :-1],
torch.cat(all_masks)[:, :-1],
)
@PPODecorators.empty_device_cache()
def train_minibatch(
self,
old_logprobs: torch.FloatTensor,
values: torch.FloatTensor,
logprobs: torch.FloatTensor,
logits: torch.FloatTensor,
vpreds: torch.FloatTensor,
mask: torch.LongTensor,
advantages: torch.FloatTensor,
returns: torch.FloatTensor,
):
"""
Train one PPO minibatch
Args:
logprobs (`torch.FloatTensor`):
Log probabilities of the model, shape [mini_batch_size, response_length]
values (`torch.FloatTensor`):
Values of the value head, shape [mini_batch_size, response_length]
query (`torch.LongTensor`):
Encoded queries, shape [mini_batch_size, query_length]
response (`torch.LongTensor`):
Encoded responses, shape [mini_batch_size, response_length]
model_input (`torch.LongTensor`):
Concatenated queries and responses, shape [mini_batch_size, query_length+response_length]
Returns:
train_stats (dict[str, `torch.Tensor`]):
Dictionary of training statistics
"""
self.model.train()
loss_p, loss_v, train_stats = self.loss(
old_logprobs, values, logits, vpreds, logprobs, mask, advantages, returns
)
loss = loss_p + loss_v
self.accelerator.backward(loss)
if self.config.max_grad_norm is not None:
if self.accelerator.sync_gradients:
self.accelerator.clip_grad_norm_(self.model_params, self.config.max_grad_norm)
self.optimizer.step()
# we call optimizer.zero_grad() every time and let `accelerator` handle accumulation
# see https://huggingface.co/docs/accelerate/usage_guides/gradient_accumulation#the-finished-code
self.optimizer.zero_grad()
return train_stats
def compute_rewards(
self,
scores: torch.FloatTensor,
logprobs: torch.FloatTensor,
ref_logprobs: torch.FloatTensor,
masks: torch.LongTensor,
):
"""
Compute per token rewards from scores and KL-penalty.
Args:
scores (`torch.FloatTensor`):
Scores from the reward model, shape (`batch_size`)
logprobs (`torch.FloatTensor`):
Log probabilities of the model, shape (`batch_size`, `response_length`)
ref_logprobs (`torch.FloatTensor`):
Log probabilities of the reference model, shape (`batch_size`, `response_length`)
Returns:
`torch.FloatTensor`: Per token rewards, shape (`batch_size`, `response_length`)
`torch.FloatTensor`: Non score rewards, shape (`batch_size`, `response_length`)
`torch.FloatTensor`: KL penalty, shape (`batch_size`, `response_length`)
"""
rewards, non_score_rewards, kls = [], [], []
for score, logprob, ref_logprob, mask in zip(scores, logprobs, ref_logprobs, masks):
# compute KL penalty (from difference in logprobs)
kl = self._kl_penalty(logprob, ref_logprob)
kls.append(kl)
non_score_reward = -self.kl_ctl.value * kl
non_score_rewards.append(non_score_reward)
reward = non_score_reward.clone()
last_non_masked_index = mask.nonzero()[-1]
# reward is preference model score + KL penalty
reward[last_non_masked_index] += score
rewards.append(reward)
return torch.stack(rewards), torch.stack(non_score_rewards), torch.stack(kls)
def _kl_penalty(self, logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor) -> torch.FloatTensor:
if self.config.kl_penalty == "kl":
return logprob - ref_logprob
if self.config.kl_penalty == "abs":
return (logprob - ref_logprob).abs()
if self.config.kl_penalty == "mse":
return 0.5 * (logprob - ref_logprob).square()
if self.config.kl_penalty == "full":
# Flip is required due to this issue? :https://github.com/pytorch/pytorch/issues/57459
return F.kl_div(ref_logprob, logprob, log_target=True, reduction="none").sum(-1)
raise NotImplementedError
def compute_advantages(
self,
values: torch.FloatTensor,
rewards: torch.FloatTensor,
mask: torch.FloatTensor,
):
lastgaelam = 0
advantages_reversed = []
gen_len = rewards.shape[-1]
values = values * mask
rewards = rewards * mask
if self.config.whiten_rewards:
rewards = masked_whiten(rewards, mask, shift_mean=False)
for t in reversed(range(gen_len)):
nextvalues = values[:, t + 1] if t < gen_len - 1 else 0.0
delta = rewards[:, t] + self.config.gamma * nextvalues - values[:, t]
lastgaelam = delta + self.config.gamma * self.config.lam * lastgaelam
advantages_reversed.append(lastgaelam)
advantages = torch.stack(advantages_reversed[::-1]).transpose(0, 1)
returns = advantages + values
advantages = masked_whiten(advantages, mask)
advantages = advantages.detach()
return values, advantages, returns
def loss(
self,
old_logprobs: torch.FloatTensor,
values: torch.FloatTensor,
logits: torch.FloatTensor,
vpreds: torch.FloatTensor,
logprobs: torch.FloatTensor,
mask: torch.LongTensor,
advantages: torch.FloatTensor,
returns: torch.FloatTensor,
):
"""
Calculate policy and value losses.
Args:
old_logprobs (`torch.FloatTensor`):
Log probabilities of the model, shape (`batch_size`, `response_length`)
values (`torch.FloatTensor`):
Values of the value head, shape (`batch_size`, `response_length`)
rewards (`torch.FloatTensor`):
Rewards from the reward model, shape (`batch_size`, `response_length`)
logits (`torch.FloatTensor`):
Logits of the model, shape (`batch_size`, `response_length`, `vocab_size`)
v_pred (`torch.FloatTensor`):
Values of the value head, shape (`batch_size`, `response_length`)
logprobs (`torch.FloatTensor`):
Log probabilities of the model, shape (`batch_size`, `response_length`)
"""
vpredclipped = clip_by_value(
vpreds,
values - self.config.cliprange_value,
values + self.config.cliprange_value,
)
vf_losses1 = (vpreds - returns) ** 2
vf_losses2 = (vpredclipped - returns) ** 2
vf_loss = 0.5 * masked_mean(torch.max(vf_losses1, vf_losses2), mask)
vf_clipfrac = masked_mean(torch.gt(vf_losses2, vf_losses1).float(), mask)
ratio = torch.exp(logprobs - old_logprobs)
pg_losses = -advantages * ratio
pg_losses2 = -advantages * torch.clamp(ratio, 1.0 - self.config.cliprange, 1.0 + self.config.cliprange)
pg_loss = masked_mean(torch.max(pg_losses, pg_losses2), mask)
pg_clipfrac = masked_mean(torch.gt(pg_losses2, pg_losses).float(), mask)
loss = pg_loss + self.config.vf_coef * vf_loss
avg_ratio = masked_mean(ratio, mask).item()
if avg_ratio > self.config.ratio_threshold:
warnings.warn(
f"The average ratio of batch ({avg_ratio:.2f}) exceeds threshold {self.config.ratio_threshold:.2f}. Skipping batch."
)
pg_loss = pg_loss * 0.0
vf_loss = vf_loss * 0.0
loss = loss * 0.0
entropy = masked_mean(entropy_from_logits(logits), mask)
approxkl = 0.5 * masked_mean((logprobs - old_logprobs) ** 2, mask)
policykl = masked_mean(old_logprobs - logprobs, mask)
return_mean, return_var = masked_mean(returns, mask), masked_var(returns, mask)
value_mean, value_var = masked_mean(values, mask), masked_var(values, mask)
stats = dict(
loss=dict(policy=pg_loss.detach(), value=vf_loss.detach(), total=loss.detach()),
policy=dict(
entropy=entropy.detach(),
approxkl=approxkl.detach(),
policykl=policykl.detach(),
clipfrac=pg_clipfrac.detach(),
advantages=advantages.detach(),
advantages_mean=masked_mean(advantages, mask).detach(),
ratio=ratio.detach(),
),
returns=dict(mean=return_mean.detach(), var=return_var.detach()),
val=dict(
vpred=masked_mean(vpreds, mask).detach(),
error=masked_mean((vpreds - returns) ** 2, mask).detach(),
clipfrac=vf_clipfrac.detach(),
mean=value_mean.detach(),
var=value_var.detach(),
),
)
return pg_loss, self.config.vf_coef * vf_loss, flatten_dict(stats)
def record_step_stats(self, kl_coef: float, **data):
"""
Record training step statistics.
Args:
kl_coef (`float`):
KL coefficient
data (`dict`):
Dictionary of training step data
Returns:
stats (`dict`):
Dictionary of training step statistics
"""
mask = data.pop("masks")
kls = data.pop("kls")
kl_list = ((kls) * mask).sum(axis=-1)
mean_kl = kl_list.mean()
mean_entropy = (-data["logprobs"] * mask).sum(axis=-1).mean()
mean_non_score_reward = masked_mean(
data["non_score_reward"], mask
) # non_score_reward is size `batch_size`, `response_length`
mean_scores = data["scores"].mean() # scores is size `batch_size`
std_scores = data["scores"].std()
if mean_kl.item() < -1.0:
# warn users
warnings.warn(
f"KL divergence is starting to become negative: {mean_kl.item():.2f} - this might be a precursor for failed training."
" sometimes this happens because the generation kwargs are not correctly set. Please make sure"
" that the generation kwargs are set correctly, or review your training hyperparameters."
)
stats = {
"objective/kl": mean_kl,
"objective/kl_dist": kl_list,
"objective/logprobs": data["logprobs"],
"objective/ref_logprobs": data["ref_logprobs"],
"objective/kl_coef": kl_coef,
"objective/entropy": mean_entropy,
"ppo/mean_non_score_reward": mean_non_score_reward,
"ppo/mean_scores": mean_scores,
"ppo/std_scores": std_scores,
}
# Log text properties
query_lens = torch.tensor([len(query) for query in data["queries"]], dtype=torch.float)
response_lens = torch.tensor([len(response) for response in data["responses"]], dtype=torch.float)
stats["tokens/queries_len_mean"] = torch.mean(query_lens).cpu().numpy().item()
stats["tokens/queries_len_std"] = torch.std(query_lens).cpu().numpy().item()
stats["tokens/queries_dist"] = query_lens.cpu().numpy()
stats["tokens/responses_len_mean"] = torch.mean(response_lens).cpu().numpy().item()
stats["tokens/responses_len_std"] = torch.std(response_lens).cpu().numpy().item()
stats["tokens/responses_dist"] = response_lens.cpu().numpy()
for k, v in data["train_stats"].items():
stats[f"ppo/{k}"] = torch.mean(v, axis=0)
stats["ppo/val/var_explained"] = 1 - stats["ppo/val/error"] / stats["ppo/returns/var"]
return stats
def log_stats(
self,
stats: dict,
batch: dict,
rewards: List[torch.FloatTensor],
columns_to_log: List[str] = ["query", "response"],
):
"""
A function that logs all the training stats. Call it at the end of each epoch.
Args:
stats (dict[str, Any]):
A dictionary of training stats.
batch (dict[str, Any]):
A dictionary of batch data, this contains the queries and responses.
rewards (`List[torch.FloatTensor]`):
A tensor of rewards.
"""
# all gather stats
if not isinstance(rewards, torch.Tensor):
rewards = torch.tensor(rewards).to(self.current_device)
rewards = self.accelerator.gather(rewards).flatten()
if self.config.log_with == "wandb":
import wandb
if any([column_to_log not in batch.keys() for column_to_log in columns_to_log]):
raise ValueError(f"Columns to log {columns_to_log} are not present in the batch {batch.keys()}.")
batch_list = [batch[column_to_log] for column_to_log in columns_to_log]
if self.is_distributed:
gathered_batch_list = []
for b in batch_list:
flattened = gather_object(b)
gathered_batch_list.append(flattened)
batch_list = gathered_batch_list
# Log only if we are in the main process
if self.accelerator.is_main_process:
logs = {}
# Log stats
if "query" not in batch.keys() and "response" not in batch.keys():
# warn the user that the game logs will not be logged
warnings.warn(
"The game logs will not be logged because the batch does not contain the keys 'query' and "
"'response'. "
)
elif self.config.log_with == "wandb":
table_rows = [list(r) for r in zip(*batch_list, rewards.cpu().tolist())]
logs.update({"game_log": wandb.Table(columns=[*columns_to_log, "reward"], rows=table_rows)})
logs.update(stats)
# manually cast in fp32 for bf16 torch tensors
for k, v in logs.items():
if isinstance(v, torch.Tensor) and v.dtype == torch.bfloat16:
logs[k] = v.float()
logs["env/reward_mean"] = torch.mean(rewards).cpu().numpy().item()
logs["env/reward_std"] = torch.std(rewards).cpu().numpy().item()
logs["env/reward_dist"] = rewards.cpu().numpy()
if self.config.log_with == "tensorboard":
# update the current step
self.current_step += 1
self.accelerator.log(
logs,
step=self.current_step if self.config.log_with == "tensorboard" else None,
)
def create_model_card(self, path: str, model_name: Optional[str] = "TRL Model") -> None:
"""Creates and saves a model card for a TRL model.
Args:
path (`str`): The path to save the model card to.
model_name (`str`, *optional*): The name of the model, defaults to `TRL Model`.
"""
try:
user = whoami()["name"]
# handle the offline case
except: # noqa
warnings.warn("Cannot retrieve user information assuming you are running in offline mode.")
return
if not os.path.exists(path):
os.makedirs(path)
model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f"{user}/{path}")
with open(os.path.join(path, "README.md"), "w", encoding="utf-8") as f:
f.write(model_card_content)
def _save_pretrained(self, save_directory: str) -> None:
self.accelerator.unwrap_model(self.model).save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
self.create_model_card(save_directory)
def _show_tokens(self, tokens, masks):
from rich import print
from rich.text import Text
text = Text()
for i, (token, mask) in enumerate(zip(tokens, masks)):
if mask == 1:
text.append(self.tokenizer.decode(token.item()), style="black on deep_sky_blue1")
text.append(" ")
else:
text.append(self.tokenizer.decode(token.item()), style="black on cyan3")
text.append(" ")
print(text)
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
# Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
config_kwargs = deepspeed_plugin.deepspeed_config
if model is not None:
if hasattr(model, "config"):
hidden_size = (
max(model.config.hidden_sizes)
if getattr(model.config, "hidden_sizes", None)
else getattr(model.config, "hidden_size", None)
)
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
config_kwargs.update(
{
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
}
)
# If ZeRO-3 is used, we shard both the active and reference model.
# Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
if config_kwargs["zero_optimization"]["stage"] != 3:
config_kwargs["zero_optimization"]["stage"] = 0
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
model.eval()
return model
| trl/trl/trainer/ppo_trainer.py/0 | {
"file_path": "trl/trl/trainer/ppo_trainer.py",
"repo_id": "trl",
"token_count": 29303
} | 431 |
# Builds GPU docker image of PyTorch specifically
# Uses multi-staged approach to reduce size
# Stage 1
# Use base conda image to reduce time
FROM continuumio/miniconda3:latest AS compile-image
# Specify py version
ENV PYTHON_VERSION=3.8
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
# Create our conda env
RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip
# We don't install pytorch here yet since CUDA isn't available
# instead we use the direct torch wheel
ENV PATH /opt/conda/envs/accelerate/bin:$PATH
# Activate our bash shell
RUN chsh -s /bin/bash
SHELL ["/bin/bash", "-c"]
# Activate the conda env, install mpy4pi, and install torch + accelerate
RUN source activate accelerate && conda install -c conda-forge mpi4py
RUN source activate accelerate && \
python3 -m pip install --no-cache-dir \
git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \
--extra-index-url https://download.pytorch.org/whl/cu117
RUN python3 -m pip install --no-cache-dir bitsandbytes
# Stage 2
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image
COPY --from=compile-image /opt/conda /opt/conda
ENV PATH /opt/conda/bin:$PATH
# Install apt libs
RUN apt-get update && \
apt-get install -y curl git wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists*
RUN echo "source activate accelerate" >> ~/.profile
# Activate the virtualenv
CMD ["/bin/bash"] | accelerate/docker/accelerate-gpu/Dockerfile/0 | {
"file_path": "accelerate/docker/accelerate-gpu/Dockerfile",
"repo_id": "accelerate",
"token_count": 539
} | 0 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Training on TPUs with 🤗 Accelerate
Training on TPUs can be slightly different from training on multi-gpu, even with 🤗 Accelerate. This guide aims to show you
where you should be careful and why, as well as the best practices in general.
## Training in a Notebook
The main carepoint when training on TPUs comes from the [`notebook_launcher`]. As mentioned in the [notebook tutorial](../usage_guides/notebook), you need to
restructure your training code into a function that can get passed to the [`notebook_launcher`] function and be careful about not declaring any tensors on the GPU.
While on a TPU that last part is not as important, a critical part to understand is that when you launch code from a notebook you do so through a process called **forking**.
When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already
utilizing a python process, you need to *fork* a new process from it to launch your code.
Where this becomes important is in regard to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your
training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead, one
model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or
on Google Colaboratory.
Below is an example of a training function passed to the [`notebook_launcher`] if training on CPUs or GPUs:
<Tip>
This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) with slight
modifications for the sake of simplicity
</Tip>
```python
def training_function():
# Initialize accelerator
accelerator = Accelerator()
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)
train_dataloader, eval_dataloader = create_dataloaders(
train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"]
)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=hyperparameters["learning_rate"])
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
num_epochs = hyperparameters["num_epochs"]
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
```
```python
from accelerate import notebook_launcher
notebook_launcher(training_function)
```
<Tip>
The `notebook_launcher` will default to 8 processes if 🤗 Accelerate has been configured for a TPU
</Tip>
If you use this example and declare the model *inside* the training loop, then on a low-resource system you will potentially see an error
like:
```
ProcessExitedException: process 0 terminated with signal SIGSEGV
```
This error is *extremely* cryptic but the basic explanation is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to
accept a single `model` argument, and declare it in an outside cell:
```python
# In another Jupyter cell
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)
```
```diff
+ def training_function(model):
# Initialize accelerator
accelerator = Accelerator()
- model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)
train_dataloader, eval_dataloader = create_dataloaders(
train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"]
)
...
```
And finally calling the training function with:
```diff
from accelerate import notebook_launcher
- notebook_launcher(training_function)
+ notebook_launcher(training_function, (model,))
```
<Tip>
The above workaround is only needed when launching a TPU instance from a Jupyter Notebook on a low-resource server such as Google Colaboratory or Kaggle. If
using a script or launching on a much beefier server declaring the model beforehand is not needed.
</Tip>
## Mixed Precision and Global Variables
As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), 🤗 Accelerate supports fp16 and bf16, both of which can be used on TPUs.
That being said, ideally `bf16` should be utilized as it is extremely efficient to use.
There are two "layers" when using `bf16` and 🤗 Accelerate on TPUs, at the base level and at the operation level.
At the base level, this is enabled when passing `mixed_precision="bf16"` to `Accelerator`, such as:
```python
accelerator = Accelerator(mixed_precision="bf16")
```
By default, this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs.
The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`.
There is a further configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then
`torch.float` is `bfloat16` and `torch.double` is `float32`.
This is performed in the `Accelerator` object when passing `downcast_bf16=True`:
```python
accelerator = Accelerator(mixed_precision="bf16", downcast_bf16=True)
```
Using downcasting instead of bf16 everywhere is good for when you are trying to calculate metrics, log values, and more where raw bf16 tensors would be unusable.
## Training Times on TPUs
As you launch your script, you may notice that training seems exceptionally slow at first. This is because TPUs
first run through a few batches of data to see how much memory to allocate before finally utilizing this configured
memory allocation extremely efficiently.
If you notice that your evaluation code to calculate the metrics of your model takes longer due to a larger batch size being used,
it is recommended to keep the batch size the same as the training data if it is too slow. Otherwise the memory will reallocate to this
new batch size after the first few iterations.
<Tip>
Just because the memory is allocated does not mean it will be used or that the batch size will increase when going back to your training dataloader.
</Tip>
| accelerate/docs/source/concept_guides/training_tpu.md/0 | {
"file_path": "accelerate/docs/source/concept_guides/training_tpu.md",
"repo_id": "accelerate",
"token_count": 2214
} | 1 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Helpful Utilities
Below are a variety of utility functions that 🤗 Accelerate provides, broken down by use-case.
## Constants
Constants used throughout 🤗 Accelerate for reference
The following are constants used when utilizing [`Accelerator.save_state`]
`utils.MODEL_NAME`: `"pytorch_model"`
`utils.OPTIMIZER_NAME`: `"optimizer"`
`utils.RNG_STATE_NAME`: `"random_states"`
`utils.SCALER_NAME`: `"scaler.pt`
`utils.SCHEDULER_NAME`: `"scheduler`
The following are constants used when utilizing [`Accelerator.save_model`]
`utils.WEIGHTS_NAME`: `"pytorch_model.bin"`
`utils.SAFE_WEIGHTS_NAME`: `"model.safetensors"`
`utils.WEIGHTS_INDEX_NAME`: `"pytorch_model.bin.index.json"`
`utils.SAFE_WEIGHTS_INDEX_NAME`: `"model.safetensors.index.json"`
## Data Classes
These are basic dataclasses used throughout 🤗 Accelerate and they can be passed in as parameters.
### Standalone
These are standalone dataclasses used for checks, such as the type of distributed system being used
[[autodoc]] utils.ComputeEnvironment
[[autodoc]] utils.DistributedType
[[autodoc]] utils.DynamoBackend
[[autodoc]] utils.LoggerType
[[autodoc]] utils.PrecisionType
[[autodoc]] utils.RNGType
[[autodoc]] utils.SageMakerDistributedType
### Kwargs
These are configurable arguments for specific interactions throughout the PyTorch ecosystem that Accelerate handles under the hood.
[[autodoc]] utils.AutocastKwargs
[[autodoc]] utils.DistributedDataParallelKwargs
[[autodoc]] utils.FP8RecipeKwargs
[[autodoc]] utils.GradScalerKwargs
[[autodoc]] utils.InitProcessGroupKwargs
## Plugins
These are plugins that can be passed to the [`Accelerator`] object. While they are defined elsewhere in the documentation,
for convenience all of them are available to see here:
[[autodoc]] utils.DeepSpeedPlugin
[[autodoc]] utils.FullyShardedDataParallelPlugin
[[autodoc]] utils.GradientAccumulationPlugin
[[autodoc]] utils.MegatronLMPlugin
[[autodoc]] utils.TorchDynamoPlugin
## Configurations
These are classes which can be configured and passed through to the appropriate integration
[[autodoc]] utils.BnbQuantizationConfig
[[autodoc]] utils.ProjectConfiguration
## Environmental Variables
These are environmental variables that can be enabled for different use cases
* `ACCELERATE_DEBUG_MODE` (`str`): Whether to run accelerate in debug mode. More info available [here](../usage_guides/debug.md).
## Data Manipulation and Operations
These include data operations that mimic the same `torch` ops but can be used on distributed processes.
[[autodoc]] utils.broadcast
[[autodoc]] utils.broadcast_object_list
[[autodoc]] utils.concatenate
[[autodoc]] utils.convert_outputs_to_fp32
[[autodoc]] utils.convert_to_fp32
[[autodoc]] utils.gather
[[autodoc]] utils.gather_object
[[autodoc]] utils.listify
[[autodoc]] utils.pad_across_processes
[[autodoc]] utils.recursively_apply
[[autodoc]] utils.reduce
[[autodoc]] utils.send_to_device
[[autodoc]] utils.slice_tensors
## Environment Checks
These functionalities check the state of the current working environment including information about the operating system itself, what it can support, and if particular dependencies are installed.
[[autodoc]] utils.is_bf16_available
[[autodoc]] utils.is_ipex_available
[[autodoc]] utils.is_mps_available
[[autodoc]] utils.is_npu_available
[[autodoc]] utils.is_torch_version
[[autodoc]] utils.is_tpu_available
[[autodoc]] utils.is_xpu_available
## Environment Manipulation
[[autodoc]] utils.patch_environment
[[autodoc]] utils.clear_environment
[[autodoc]] utils.write_basic_config
When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration.
## Memory
[[autodoc]] utils.find_executable_batch_size
## Modeling
These utilities relate to interacting with PyTorch models
[[autodoc]] utils.calculate_maximum_sizes
[[autodoc]] utils.compute_module_sizes
[[autodoc]] utils.extract_model_from_parallel
[[autodoc]] utils.get_balanced_memory
[[autodoc]] utils.get_max_layer_size
[[autodoc]] utils.infer_auto_device_map
[[autodoc]] utils.load_checkpoint_in_model
[[autodoc]] utils.load_offloaded_weights
[[autodoc]] utils.load_state_dict
[[autodoc]] utils.offload_state_dict
[[autodoc]] utils.retie_parameters
[[autodoc]] utils.set_module_tensor_to_device
[[autodoc]] utils.shard_checkpoint
## Parallel
These include general utilities that should be used when working in parallel.
[[autodoc]] utils.extract_model_from_parallel
[[autodoc]] utils.save
[[autodoc]] utils.wait_for_everyone
## Random
These utilities relate to setting and synchronizing of all the random states.
[[autodoc]] utils.set_seed
[[autodoc]] utils.synchronize_rng_state
[[autodoc]] utils.synchronize_rng_states
## PyTorch XLA
These include utilities that are useful while using PyTorch with XLA.
[[autodoc]] utils.install_xla
## Loading model weights
These include utilities that are useful to load checkpoints.
[[autodoc]] utils.load_checkpoint_in_model
## Quantization
These include utilities that are useful to quantize model.
[[autodoc]] utils.load_and_quantize_model
| accelerate/docs/source/package_reference/utilities.md/0 | {
"file_path": "accelerate/docs/source/package_reference/utilities.md",
"repo_id": "accelerate",
"token_count": 1932
} | 2 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Amazon SageMaker
Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to
make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/).
## Getting Started
### Setup & Installation
Before you can run your 🤗 Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not
have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html).
After you have your AWS Account you need to install the `sagemaker` sdk for 🤗 Accelerate with:
```bash
pip install "accelerate[sagemaker]" --upgrade
```
🤗 Accelerate currently uses the 🤗 DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. 🤗
Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a
`requirements.txt` in the same directory where your training script is located and add it as dependency:
```
accelerate
```
You should also add any other dependencies you have to this `requirements.txt`.
### Configure 🤗 Accelerate
You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with
the 🤗 Accelerate CLI:
```bash
accelerate config
# In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1
```
🤗 Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit.
<Tip>
🤗 Accelerate is not saving any of your credentials.
</Tip>
### Prepare a 🤗 Accelerate fine-tuning script
The training script is very similar to a training script you might run outside of SageMaker, but to save your model
after training you need to specify either `/opt/ml/model` or use `os.environ["SM_MODEL_DIR"]` as your save
directory. After training, artifacts in this directory are uploaded to S3:
```diff
- torch.save('/opt/ml/model`)
+ accelerator.save('/opt/ml/model')
```
<Tip warning={true}>
SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to
specify type as bool in your script and provide an explicit True or False value for this hyperparameter. [[REF]](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script).
</Tip>
### Launch Training
You can launch your training with 🤗 Accelerate CLI with:
```
accelerate launch path_to_script.py --args_to_the_script
```
This will launch your training script using your configuration. The only thing you have to do is provide all the
arguments needed by your training script as named arguments.
**Examples**
<Tip>
If you run one of the example scripts, don't forget to add `accelerator.save('/opt/ml/model')` to it.
</Tip>
```bash
accelerate launch ./examples/sagemaker_example.py
```
Outputs:
```
Configuring Amazon SageMaker environment
Converting Arguments to Hyperparameters
Creating Estimator
2021-04-08 11:56:50 Starting - Starting the training job...
2021-04-08 11:57:13 Starting - Launching requested ML instancesProfilerReport-1617883008: InProgress
.........
2021-04-08 11:58:54 Starting - Preparing the instances for training.........
2021-04-08 12:00:24 Downloading - Downloading input data
2021-04-08 12:00:24 Training - Downloading the training image..................
2021-04-08 12:03:39 Training - Training image download completed. Training in progress..
........
epoch 0: {'accuracy': 0.7598039215686274, 'f1': 0.8178438661710037}
epoch 1: {'accuracy': 0.8357843137254902, 'f1': 0.882249560632689}
epoch 2: {'accuracy': 0.8406862745098039, 'f1': 0.8869565217391304}
........
2021-04-08 12:05:40 Uploading - Uploading generated training model
2021-04-08 12:05:40 Completed - Training job completed
Training seconds: 331
Billable seconds: 331
You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04-08-11-56-47-108/output/model.tar.gz
```
## Advanced Features
### Distributed Training: Data Parallelism
Set up the accelerate config by running `accelerate config` and answer the SageMaker questions and set it up.
To use SageMaker DDP, select it when asked
`What is the distributed mode? ([0] No distributed training, [1] data parallelism):`.
Example config below:
```yaml
base_job_name: accelerate-sagemaker-1
compute_environment: AMAZON_SAGEMAKER
distributed_type: DATA_PARALLEL
ec2_instance_type: ml.p3.16xlarge
iam_role_name: xxxxx
image_uri: null
mixed_precision: fp16
num_machines: 1
profile: xxxxx
py_version: py38
pytorch_version: 1.10.2
region: us-east-1
transformers_version: 4.17.0
use_cpu: false
```
### Distributed Training: Model Parallelism
*currently in development, will be supported soon.*
### Python packages and dependencies
🤗 Accelerate currently uses the 🤗 DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. If you
want to use different/other Python packages you can do this by adding them to the `requirements.txt`. These packages
will be installed before your training script is started.
### Local Training: SageMaker Local mode
The local mode in the SageMaker SDK allows you to run your training script locally inside the HuggingFace DLC (Deep Learning container)
or using your custom container image. This is useful for debugging and testing your training script inside the final container environment.
Local mode uses Docker compose (*Note: Docker Compose V2 is not supported yet*). The SDK will handle the authentication against ECR
to pull the DLC to your local environment. You can emulate CPU (single and multi-instance) and GPU (single instance) SageMaker training jobs.
To use local mode, you need to set your `ec2_instance_type` to `local`.
```yaml
ec2_instance_type: local
```
### Advanced configuration
The configuration allows you to override parameters for the [Estimator](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html).
These settings have to be applied in the config file and are not part of `accelerate config`. You can control many additional aspects of the training job, e.g. use Spot instances, enable network isolation and many more.
```yaml
additional_args:
# enable network isolation to restrict internet access for containers
enable_network_isolation: True
```
You can find all available configuration [here](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html).
### Use Spot Instances
You can use Spot Instances e.g. using (see [Advanced configuration](#advanced-configuration)):
```yaml
additional_args:
use_spot_instances: True
max_wait: 86400
```
*Note: Spot Instances are subject to be terminated and training to be continued from a checkpoint. This is not handled in 🤗 Accelerate out of the box. Contact us if you would like this feature.*
### Remote scripts: Use scripts located on Github
*undecided if feature is needed. Contact us if you would like this feature.* | accelerate/docs/source/usage_guides/sagemaker.md/0 | {
"file_path": "accelerate/docs/source/usage_guides/sagemaker.md",
"repo_id": "accelerate",
"token_count": 2261
} | 3 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage3(Scene):
def construct(self):
mem = Rectangle(height=0.5,width=0.5)
meta_mem = Rectangle(height=0.25,width=0.25)
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
cpu_left_col_base = [mem.copy() for i in range(6)]
cpu_right_col_base = [mem.copy() for i in range(6)]
cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)
cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)
cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0)
cpu_text = Text("CPU", font_size=24)
cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
cpu.move_to([-2.5,-.5,0])
self.add(cpu)
gpu_base = [mem.copy() for i in range(4)]
gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0)
gpu_text = Text("GPU", font_size=24)
gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
gpu.move_to([-1,-1,0])
self.add(gpu)
model_base = [mem.copy() for i in range(6)]
model_rect = VGroup(*model_base).arrange(RIGHT,buff=0)
model_text = Text("Model", font_size=24)
model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
model.move_to([3, -1., 0])
self.add(model)
model_arr = []
model_cpu_arr = []
model_meta_arr = []
for i,rect in enumerate(model_base):
rect.set_stroke(YELLOW)
cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP)
cpu_target.set_x(cpu_target.get_x()+0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.)
else:
cpu_target.next_to(model_cpu_arr[i-1], direction=RIGHT, buff=0.)
self.add(cpu_target)
model_cpu_arr.append(cpu_target)
self.add(*model_arr, *model_cpu_arr, *model_meta_arr)
checkpoint_base = [mem.copy() for i in range(6)]
checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0)
checkpoint_text = Text("Loaded Checkpoint", font_size=24)
checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
checkpoint.move_to([3, .5, 0])
self.add(checkpoint)
ckpt_arr = []
ckpt_cpu_arr = []
for i,rect in enumerate(checkpoint_base):
target = fill.copy().set_fill(BLUE, opacity=0.7)
target.move_to(rect)
ckpt_arr.append(target)
cpu_target = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i+1])
else:
cpu_target.move_to(cpu_right_col_base[i-5])
ckpt_cpu_arr.append(cpu_target)
self.add(*ckpt_arr, *ckpt_cpu_arr)
key = Square(side_length=2.2)
key.move_to([-5, 2, 0])
key_text = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model",
font_size=18,
)
key_text.move_to([-5, 2.4, 0])
self.add(key_text, key)
blue_text = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint",
font_size=18,
)
blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left())
self.add(blue_text)
step_3 = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.',
font_size=24
)
step_3.move_to([2, 2, 0])
disk_left_col_base = [meta_mem.copy() for i in range(6)]
disk_right_col_base = [meta_mem.copy() for i in range(6)]
disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)
disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)
disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0)
disk_text = Text("Disk", font_size=24)
disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
disk.move_to([-4.,-1.25,0])
self.play(
Write(step_3, run_time=3),
Write(disk_text, run_time=1),
Create(disk_rects, run_time=1)
)
animations = []
for i,rect in enumerate(ckpt_cpu_arr):
target = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(target, run_time=1.5))
self.play(*animations)
self.play(FadeOut(step_3))
step_4 = MarkupText(
f'Then, the checkpoint is removed from memory\nthrough garbage collection.',
font_size=24
)
step_4.move_to([2, 2, 0])
self.play(
Write(step_4, run_time=3)
)
self.play(
FadeOut(checkpoint_rect, checkpoint_text, *ckpt_arr, *ckpt_cpu_arr),
)
self.wait() | accelerate/manim_animations/big_model_inference/stage_3.py/0 | {
"file_path": "accelerate/manim_animations/big_model_inference/stage_3.py",
"repo_id": "accelerate",
"token_count": 2891
} | 4 |
#!/usr/bin/env python
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
DYNAMO_BACKENDS = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"AOT_TS_NVFUSER",
"NVPRIMS_NVFUSER",
"CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"TENSORRT",
"IPEX",
"TVM",
]
def _ask_field(input_text, convert_value=None, default=None, error_message=None):
ask_again = True
while ask_again:
result = input(input_text)
try:
if default is not None and len(result) == 0:
return default
return convert_value(result) if convert_value is not None else result
except Exception:
if error_message is not None:
print(error_message)
def _ask_options(input_text, options=[], convert_value=None, default=0):
menu = BulletMenu(input_text, options)
result = menu.run(default_choice=default)
return convert_value(result) if convert_value is not None else result
def _convert_compute_environment(value):
value = int(value)
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
def _convert_distributed_mode(value):
value = int(value)
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value])
def _convert_dynamo_backend(value):
value = int(value)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def _convert_mixed_precision(value):
value = int(value)
return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
def _convert_sagemaker_distributed_mode(value):
value = int(value)
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value])
def _convert_yes_no_to_bool(value):
return {"yes": True, "no": False}[value.lower()]
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
"""
A custom formatter that will remove the usage line from the help message for subcommands.
"""
def _format_usage(self, usage, actions, groups, prefix):
usage = super()._format_usage(usage, actions, groups, prefix)
usage = usage.replace("<command> [<args>] ", "")
return usage
| accelerate/src/accelerate/commands/config/config_utils.py/0 | {
"file_path": "accelerate/src/accelerate/commands/config/config_utils.py",
"repo_id": "accelerate",
"token_count": 1098
} | 5 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Dict, List, Mapping, Optional, Union
import torch
import torch.nn as nn
from .state import PartialState
from .utils import (
PrefixedDataset,
find_device,
named_module_tensors,
send_to_device,
set_module_tensor_to_device,
)
from .utils.modeling import get_non_persistent_buffers
from .utils.other import recursive_getattr
class ModelHook:
"""
A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
with PyTorch existing hooks is that they get passed along the kwargs.
Class attribute:
- **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
the `torch.no_grad()` context manager.
"""
no_grad = False
def init_hook(self, module):
"""
To be executed when the hook is attached to the module.
Args:
module (`torch.nn.Module`): The module attached to this hook.
"""
return module
def pre_forward(self, module, *args, **kwargs):
"""
To be executed just before the forward method of the model.
Args:
module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.
args (`Tuple[Any]`): The positional arguments passed to the module.
kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.
Returns:
`Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
"""
return args, kwargs
def post_forward(self, module, output):
"""
To be executed just after the forward method of the model.
Args:
module (`torch.nn.Module`): The module whose forward pass been executed just before this event.
output (`Any`): The output of the module.
Returns:
`Any`: The processed `output`.
"""
return output
def detach_hook(self, module):
"""
To be executed when the hook is detached from a module.
Args:
module (`torch.nn.Module`): The module detached from this hook.
"""
return module
class SequentialHook(ModelHook):
"""
A hook that can contain several hooks and iterates through them at each event.
"""
def __init__(self, *hooks):
self.hooks = hooks
def init_hook(self, module):
for hook in self.hooks:
module = hook.init_hook(module)
return module
def pre_forward(self, module, *args, **kwargs):
for hook in self.hooks:
args, kwargs = hook.pre_forward(module, *args, **kwargs)
return args, kwargs
def post_forward(self, module, output):
for hook in self.hooks:
output = hook.post_forward(module, output)
return output
def detach_hook(self, module):
for hook in self.hooks:
module = hook.detach_hook(module)
return module
def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):
"""
Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove
this behavior and restore the original `forward` method, use `remove_hook_from_module`.
<Tip warning={true}>
If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks
together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.
</Tip>
Args:
module (`torch.nn.Module`):
The module to attach a hook to.
hook (`ModelHook`):
The hook to attach.
append (`bool`, *optional*, defaults to `False`):
Whether the hook should be chained with an existing one (if module already contains a hook) or not.
Returns:
`torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
be discarded).
"""
if append and (getattr(module, "_hf_hook", None) is not None):
old_hook = module._hf_hook
remove_hook_from_module(module)
hook = SequentialHook(old_hook, hook)
if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"):
# If we already put some hook on this module, we replace it with the new one.
old_forward = module._old_forward
else:
old_forward = module.forward
module._old_forward = old_forward
module = hook.init_hook(module)
module._hf_hook = hook
def new_forward(module, *args, **kwargs):
args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
if module._hf_hook.no_grad:
with torch.no_grad():
output = module._old_forward(*args, **kwargs)
else:
output = module._old_forward(*args, **kwargs)
return module._hf_hook.post_forward(module, output)
# Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
# Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
if "GraphModuleImpl" in str(type(module)):
module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
else:
module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
return module
def remove_hook_from_module(module: nn.Module, recurse=False):
"""
Removes any hook attached to a module via `add_hook_to_module`.
Args:
module (`torch.nn.Module`): The module to attach a hook to.
recurse (`bool`, **optional**): Whether to remove the hooks recursively
Returns:
`torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
be discarded).
"""
if hasattr(module, "_hf_hook"):
module._hf_hook.detach_hook(module)
delattr(module, "_hf_hook")
if hasattr(module, "_old_forward"):
# Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
# Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
if "GraphModuleImpl" in str(type(module)):
module.__class__.forward = module._old_forward
else:
module.forward = module._old_forward
delattr(module, "_old_forward")
if recurse:
for child in module.children():
remove_hook_from_module(child, recurse)
return module
class AlignDevicesHook(ModelHook):
"""
A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the
associated module, potentially offloading the weights after the forward pass.
Args:
execution_device (`torch.device`, *optional*):
The device on which inputs and model weights should be placed before the forward pass.
offload (`bool`, *optional*, defaults to `False`):
Whether or not the weights should be offloaded after the forward pass.
io_same_device (`bool`, *optional*, defaults to `False`):
Whether or not the output should be placed on the same device as the input was.
weights_map (`Mapping[str, torch.Tensor]`, *optional*):
When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
offload_buffers (`bool`, *optional*, defaults to `False`):
Whether or not to include the associated module's buffers when offloading.
place_submodules (`bool`, *optional*, defaults to `False`):
Whether to place the submodules on `execution_device` during the `init_hook` event.
"""
def __init__(
self,
execution_device: Optional[Union[int, str, torch.device]] = None,
offload: bool = False,
io_same_device: bool = False,
weights_map: Optional[Mapping] = None,
offload_buffers: bool = False,
place_submodules: bool = False,
skip_keys: Optional[Union[str, List[str]]] = None,
tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
):
self.execution_device = execution_device
self.offload = offload
self.io_same_device = io_same_device
self.weights_map = weights_map
self.offload_buffers = offload_buffers
self.place_submodules = place_submodules
self.skip_keys = skip_keys
# Will contain the input device when `io_same_device=True`.
self.input_device = None
self.param_original_devices = {}
self.buffer_original_devices = {}
self.tied_params_names = set()
# The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory
# for tied weights already loaded on the target execution device.
self.tied_params_map = tied_params_map
def __repr__(self):
return (
f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
)
def init_hook(self, module):
# In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero.
if self.execution_device == "meta" or self.execution_device == torch.device("meta"):
self.tied_params_map = None
if not self.offload and self.execution_device is not None:
for name, _ in named_module_tensors(module, recurse=self.place_submodules):
set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)
elif self.offload:
self.original_devices = {
name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules)
}
if self.weights_map is None:
self.weights_map = {
name: param.to("cpu")
for name, param in named_module_tensors(
module, include_buffers=self.offload_buffers, recurse=self.place_submodules
)
}
for name, _ in named_module_tensors(
module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True
):
# When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer,
# as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
# As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str]
# to add on the fly pointers to `tied_params_map` in the pre_forward call.
if (
self.tied_params_map is not None
and recursive_getattr(module, name).data_ptr() in self.tied_params_map
):
self.tied_params_names.add(name)
set_module_tensor_to_device(module, name, "meta")
if not self.offload_buffers and self.execution_device is not None:
for name, _ in module.named_buffers(recurse=self.place_submodules):
set_module_tensor_to_device(
module, name, self.execution_device, tied_params_map=self.tied_params_map
)
elif self.offload_buffers and self.execution_device is not None:
for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
set_module_tensor_to_device(
module, name, self.execution_device, tied_params_map=self.tied_params_map
)
return module
def pre_forward(self, module, *args, **kwargs):
if self.io_same_device:
self.input_device = find_device([args, kwargs])
if self.offload:
self.tied_pointers_to_remove = set()
for name, _ in named_module_tensors(
module,
include_buffers=self.offload_buffers,
recurse=self.place_submodules,
remove_non_persistent=True,
):
fp16_statistics = None
value = self.weights_map[name]
if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys():
if value.dtype == torch.int8:
fp16_statistics = self.weights_map[name.replace("weight", "SCB")]
# In case we are using offloading with tied weights, we need to keep track of the offloaded weights
# that are loaded on device at this point, as we will need to remove them as well from the dictionary
# self.tied_params_map in order to allow to free memory.
if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map:
self.tied_params_map[value.data_ptr()] = {}
if (
value is not None
and self.tied_params_map is not None
and value.data_ptr() in self.tied_params_map
and self.execution_device not in self.tied_params_map[value.data_ptr()]
):
self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device))
set_module_tensor_to_device(
module,
name,
self.execution_device,
value=value,
fp16_statistics=fp16_statistics,
tied_params_map=self.tied_params_map,
)
return send_to_device(args, self.execution_device), send_to_device(
kwargs, self.execution_device, skip_keys=self.skip_keys
)
def post_forward(self, module, output):
if self.offload:
for name, _ in named_module_tensors(
module,
include_buffers=self.offload_buffers,
recurse=self.place_submodules,
remove_non_persistent=True,
):
set_module_tensor_to_device(module, name, "meta")
if type(module).__name__ == "Linear8bitLt":
module.state.SCB = None
module.state.CxB = None
# We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from
# this dictionary to allow the garbage collector to do its job.
for value_pointer, device in self.tied_pointers_to_remove:
del self.tied_params_map[value_pointer][device]
self.tied_pointers_to_remove = None
if self.io_same_device and self.input_device is not None:
output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
return output
def detach_hook(self, module):
if self.offload:
for name, device in self.original_devices.items():
if device != torch.device("meta"):
set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
return module
def attach_execution_device_hook(
module: torch.nn.Module,
execution_device: Union[int, str, torch.device],
skip_keys: Optional[Union[str, List[str]]] = None,
preload_module_classes: Optional[List[str]] = None,
tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
):
"""
Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
execution device
Args:
module (`torch.nn.Module`):
The module where we want to attach the hooks.
execution_device (`int`, `str` or `torch.device`):
The device on which inputs and model weights should be placed before the forward pass.
skip_keys (`str` or `List[str]`, *optional*):
A list of keys to ignore when moving inputs or outputs between devices.
preload_module_classes (`List[str]`, *optional*):
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
of the forward. This should only be used for classes that have submodules which are registered but not
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
instead of duplicating memory.
"""
if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0:
add_hook_to_module(
module,
AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map),
)
# Break the recursion if we get to a preload module.
if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
return
for child in module.children():
attach_execution_device_hook(child, execution_device, tied_params_map=tied_params_map)
def attach_align_device_hook(
module: torch.nn.Module,
execution_device: Optional[torch.device] = None,
offload: bool = False,
weights_map: Optional[Mapping] = None,
offload_buffers: bool = False,
module_name: str = "",
skip_keys: Optional[Union[str, List[str]]] = None,
preload_module_classes: Optional[List[str]] = None,
tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
):
"""
Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
buffers.
Args:
module (`torch.nn.Module`):
The module where we want to attach the hooks.
execution_device (`torch.device`, *optional*):
The device on which inputs and model weights should be placed before the forward pass.
offload (`bool`, *optional*, defaults to `False`):
Whether or not the weights should be offloaded after the forward pass.
weights_map (`Mapping[str, torch.Tensor]`, *optional*):
When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
offload_buffers (`bool`, *optional*, defaults to `False`):
Whether or not to include the associated module's buffers when offloading.
module_name (`str`, *optional*, defaults to `""`):
The name of the module.
skip_keys (`str` or `List[str]`, *optional*):
A list of keys to ignore when moving inputs or outputs between devices.
preload_module_classes (`List[str]`, *optional*):
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
of the forward. This should only be used for classes that have submodules which are registered but not
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
instead of duplicating memory.
"""
# Attach the hook on this module if it has any direct tensor.
directs = named_module_tensors(module)
full_offload = (
offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes
)
if len(list(directs)) > 0 or full_offload:
if weights_map is not None:
prefix = f"{module_name}." if len(module_name) > 0 else ""
prefixed_weights_map = PrefixedDataset(weights_map, prefix)
else:
prefixed_weights_map = None
hook = AlignDevicesHook(
execution_device=execution_device,
offload=offload,
weights_map=prefixed_weights_map,
offload_buffers=offload_buffers,
place_submodules=full_offload,
skip_keys=skip_keys,
tied_params_map=tied_params_map,
)
add_hook_to_module(module, hook, append=True)
# We stop the recursion in case we hit the full offload.
if full_offload:
return
# Recurse on all children of the module.
for child_name, child in module.named_children():
child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
attach_align_device_hook(
child,
execution_device=execution_device,
offload=offload,
weights_map=weights_map,
offload_buffers=offload_buffers,
module_name=child_name,
preload_module_classes=preload_module_classes,
skip_keys=skip_keys,
tied_params_map=tied_params_map,
)
def remove_hook_from_submodules(module: nn.Module):
"""
Recursively removes all hooks attached on the submodules of a given model.
Args:
module (`torch.nn.Module`): The module on which to remove all hooks.
"""
remove_hook_from_module(module)
for child in module.children():
remove_hook_from_submodules(child)
def attach_align_device_hook_on_blocks(
module: nn.Module,
execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
offload: Union[bool, Dict[str, bool]] = False,
weights_map: Mapping = None,
offload_buffers: bool = False,
module_name: str = "",
skip_keys: Optional[Union[str, List[str]]] = None,
preload_module_classes: Optional[List[str]] = None,
tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
):
"""
Attaches `AlignDevicesHook` to all blocks of a given model as needed.
Args:
module (`torch.nn.Module`):
The module where we want to attach the hooks.
execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):
The device on which inputs and model weights should be placed before the forward pass. It can be one device
for the whole module, or a dictionary mapping module name to device.
offload (`bool`, *optional*, defaults to `False`):
Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole
module, or a dictionary mapping module name to boolean.
weights_map (`Mapping[str, torch.Tensor]`, *optional*):
When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
offload_buffers (`bool`, *optional*, defaults to `False`):
Whether or not to include the associated module's buffers when offloading.
module_name (`str`, *optional*, defaults to `""`):
The name of the module.
skip_keys (`str` or `List[str]`, *optional*):
A list of keys to ignore when moving inputs or outputs between devices.
preload_module_classes (`List[str]`, *optional*):
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
of the forward. This should only be used for classes that have submodules which are registered but not
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
instead of duplicating memory.
"""
# If one device and one offload, we've got one hook.
if not isinstance(execution_device, Mapping) and not isinstance(offload, dict):
if not offload:
hook = AlignDevicesHook(
execution_device=execution_device,
io_same_device=True,
skip_keys=skip_keys,
place_submodules=True,
tied_params_map=tied_params_map,
)
add_hook_to_module(module, hook)
else:
attach_align_device_hook(
module,
execution_device=execution_device,
offload=True,
weights_map=weights_map,
offload_buffers=offload_buffers,
module_name=module_name,
skip_keys=skip_keys,
tied_params_map=tied_params_map,
)
return
if not isinstance(execution_device, Mapping):
execution_device = {key: execution_device for key in offload.keys()}
if not isinstance(offload, Mapping):
offload = {key: offload for key in execution_device.keys()}
if module_name in execution_device and module_name in offload and not offload[module_name]:
hook = AlignDevicesHook(
execution_device=execution_device[module_name],
offload_buffers=offload_buffers,
io_same_device=(module_name == ""),
place_submodules=True,
skip_keys=skip_keys,
tied_params_map=tied_params_map,
)
add_hook_to_module(module, hook)
attach_execution_device_hook(module, execution_device[module_name], tied_params_map=tied_params_map)
elif module_name in execution_device and module_name in offload:
attach_align_device_hook(
module,
execution_device=execution_device[module_name],
offload=True,
weights_map=weights_map,
offload_buffers=offload_buffers,
module_name=module_name,
skip_keys=skip_keys,
preload_module_classes=preload_module_classes,
tied_params_map=tied_params_map,
)
if not hasattr(module, "_hf_hook"):
hook = AlignDevicesHook(
execution_device=execution_device[module_name],
io_same_device=(module_name == ""),
skip_keys=skip_keys,
tied_params_map=tied_params_map,
)
add_hook_to_module(module, hook)
attach_execution_device_hook(
module,
execution_device[module_name],
preload_module_classes=preload_module_classes,
skip_keys=skip_keys,
tied_params_map=tied_params_map,
)
elif module_name == "":
hook = AlignDevicesHook(
execution_device=execution_device.get(""),
io_same_device=True,
skip_keys=skip_keys,
tied_params_map=tied_params_map,
)
add_hook_to_module(module, hook)
for child_name, child in module.named_children():
child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
attach_align_device_hook_on_blocks(
child,
execution_device=execution_device,
offload=offload,
weights_map=weights_map,
offload_buffers=offload_buffers,
module_name=child_name,
preload_module_classes=preload_module_classes,
skip_keys=skip_keys,
tied_params_map=tied_params_map,
)
class CpuOffload(ModelHook):
"""
Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
the forward, the user needs to call the `init_hook` method again for this.
Args:
execution_device(`str`, `int` or `torch.device`, *optional*):
The device on which the model should be executed. Will default to the MPS device if it's available, then
GPU 0 if there is a GPU, and finally to the CPU.
prev_module_hook (`UserCpuOffloadHook`, *optional*):
The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If
passed, its offload method will be called just before the forward of the model to which this hook is
attached.
"""
def __init__(
self,
execution_device: Optional[Union[str, int, torch.device]] = None,
prev_module_hook: Optional["UserCpuOffloadHook"] = None,
):
self.prev_module_hook = prev_module_hook
self.execution_device = execution_device if execution_device is not None else PartialState().default_device
def init_hook(self, module):
return module.to("cpu")
def pre_forward(self, module, *args, **kwargs):
if self.prev_module_hook is not None:
self.prev_module_hook.offload()
module.to(self.execution_device)
return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
class UserCpuOffloadHook:
"""
A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
or remove it entirely.
"""
def __init__(self, model, hook):
self.model = model
self.hook = hook
def offload(self):
self.hook.init_hook(self.model)
def remove(self):
remove_hook_from_module(self.model)
| accelerate/src/accelerate/hooks.py/0 | {
"file_path": "accelerate/src/accelerate/hooks.py",
"repo_id": "accelerate",
"token_count": 12919
} | 6 |
import torch
def main():
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
else:
num_gpus = 0
print(f"Successfully ran on {num_gpus} GPUs")
if __name__ == "__main__":
main()
| accelerate/src/accelerate/test_utils/scripts/test_cli.py/0 | {
"file_path": "accelerate/src/accelerate/test_utils/scripts/test_cli.py",
"repo_id": "accelerate",
"token_count": 102
} | 7 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import importlib.metadata
import os
import warnings
from functools import lru_cache
import torch
from packaging import version
from packaging.version import parse
from .environment import parse_flag_from_env, str_to_bool
from .versions import compare_versions, is_torch_version
try:
import torch_xla.core.xla_model as xm # noqa: F401
_tpu_available = True
except ImportError:
_tpu_available = False
# Cache this result has it's a C FFI call which can be pretty time-consuming
_torch_distributed_available = torch.distributed.is_available()
def _is_package_available(pkg_name):
# Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version
package_exists = importlib.util.find_spec(pkg_name) is not None
if package_exists:
try:
_ = importlib.metadata.metadata(pkg_name)
return True
except importlib.metadata.PackageNotFoundError:
return False
def is_torch_distributed_available() -> bool:
return _torch_distributed_available
def is_ccl_available():
try:
pass
except ImportError:
print(
"Intel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) GPUs, but it is not"
" detected. If you see \"ValueError: Invalid backend: 'ccl'\" error, please install Intel(R) oneCCL"
" Bindings for PyTorch*."
)
return (
importlib.util.find_spec("torch_ccl") is not None
or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None
)
def get_ccl_version():
return importlib.metadata.version("oneccl_bind_pt")
def is_msamp_available():
package_exists = importlib.util.find_spec("msamp") is not None
if package_exists:
try:
# MS-AMP has a different metadata name
_ = importlib.metadata.metadata("ms-amp")
return True
except importlib.metadata.PackageNotFoundError:
return False
return False
def is_transformer_engine_available():
return _is_package_available("transformer_engine")
def is_fp8_available():
return is_msamp_available() or is_transformer_engine_available()
def is_cuda_available():
"""
Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda
uninitialized.
"""
try:
os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = str(1)
available = torch.cuda.is_available()
finally:
os.environ.pop("PYTORCH_NVML_BASED_CUDA_CHECK", None)
return available
@lru_cache
def is_tpu_available(check_device=True):
"Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
# Due to bugs on the amp series GPUs, we disable torch-xla on them
if is_cuda_available():
return False
if check_device:
if _tpu_available:
try:
# Will raise a RuntimeError if no XLA configuration is found
_ = xm.xla_device()
return True
except RuntimeError:
return False
return _tpu_available
def is_deepspeed_available():
return _is_package_available("deepspeed")
def is_bf16_available(ignore_tpu=False):
"Checks if bf16 is supported, optionally ignoring the TPU"
if is_tpu_available():
return not ignore_tpu
if is_cuda_available():
return torch.cuda.is_bf16_supported()
return True
def is_4bit_bnb_available():
package_exists = _is_package_available("bitsandbytes")
if package_exists:
bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
return compare_versions(bnb_version, ">=", "0.39.0")
return False
def is_8bit_bnb_available():
package_exists = _is_package_available("bitsandbytes")
if package_exists:
bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
return compare_versions(bnb_version, ">=", "0.37.2")
return False
def is_bnb_available():
return _is_package_available("bitsandbytes")
def is_megatron_lm_available():
if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1:
package_exists = importlib.util.find_spec("megatron") is not None
if package_exists:
try:
megatron_version = parse(importlib.metadata.version("megatron-lm"))
return compare_versions(megatron_version, ">=", "2.2.0")
except Exception as e:
warnings.warn(f"Parse Megatron version failed. Exception:{e}")
return False
def is_transformers_available():
return _is_package_available("transformers")
def is_datasets_available():
return _is_package_available("datasets")
def is_peft_available():
return _is_package_available("peft")
def is_timm_available():
return _is_package_available("timm")
def is_aim_available():
package_exists = _is_package_available("aim")
if package_exists:
aim_version = version.parse(importlib.metadata.version("aim"))
return compare_versions(aim_version, "<", "4.0.0")
return False
def is_tensorboard_available():
return _is_package_available("tensorboard") or _is_package_available("tensorboardX")
def is_wandb_available():
return _is_package_available("wandb")
def is_comet_ml_available():
return _is_package_available("comet_ml")
def is_boto3_available():
return _is_package_available("boto3")
def is_rich_available():
if _is_package_available("rich"):
if "ACCELERATE_DISABLE_RICH" in os.environ:
warnings.warn(
"`ACCELERATE_DISABLE_RICH` is deprecated and will be removed in v0.22.0 and deactivated by default. Please use `ACCELERATE_ENABLE_RICH` if you wish to use `rich`."
)
return not parse_flag_from_env("ACCELERATE_DISABLE_RICH", False)
return parse_flag_from_env("ACCELERATE_ENABLE_RICH", False)
return False
def is_sagemaker_available():
return _is_package_available("sagemaker")
def is_tqdm_available():
return _is_package_available("tqdm")
def is_clearml_available():
return _is_package_available("clearml")
def is_pandas_available():
return _is_package_available("pandas")
def is_mlflow_available():
if _is_package_available("mlflow"):
return True
if importlib.util.find_spec("mlflow") is not None:
try:
_ = importlib.metadata.metadata("mlflow-skinny")
return True
except importlib.metadata.PackageNotFoundError:
return False
return False
def is_mps_available():
return is_torch_version(">=", "1.12") and torch.backends.mps.is_available() and torch.backends.mps.is_built()
def is_ipex_available():
def get_major_and_minor_from_version(full_version):
return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
_torch_version = importlib.metadata.version("torch")
if importlib.util.find_spec("intel_extension_for_pytorch") is None:
return False
_ipex_version = "N/A"
try:
_ipex_version = importlib.metadata.version("intel_extension_for_pytorch")
except importlib.metadata.PackageNotFoundError:
return False
torch_major_and_minor = get_major_and_minor_from_version(_torch_version)
ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version)
if torch_major_and_minor != ipex_major_and_minor:
warnings.warn(
f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*,"
f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again."
)
return False
return True
@lru_cache
def is_npu_available(check_device=False):
"Checks if `torch_npu` is installed and potentially if a NPU is in the environment"
if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None:
return False
import torch
import torch_npu # noqa: F401
if check_device:
try:
# Will raise a RuntimeError if no NPU is found
_ = torch.npu.device_count()
return torch.npu.is_available()
except RuntimeError:
return False
return hasattr(torch, "npu") and torch.npu.is_available()
@lru_cache
def is_xpu_available(check_device=False):
"check if user disables it explicitly"
if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True):
return False
"Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment"
if is_ipex_available():
import torch
if is_torch_version("<=", "1.12"):
return False
else:
return False
import intel_extension_for_pytorch # noqa: F401
if check_device:
try:
# Will raise a RuntimeError if no XPU is found
_ = torch.xpu.device_count()
return torch.xpu.is_available()
except RuntimeError:
return False
return hasattr(torch, "xpu") and torch.xpu.is_available()
def is_dvclive_available():
return _is_package_available("dvclive")
| accelerate/src/accelerate/utils/imports.py/0 | {
"file_path": "accelerate/src/accelerate/utils/imports.py",
"repo_id": "accelerate",
"token_count": 3921
} | 8 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import io
import itertools
import json
import os
import tempfile
from copy import deepcopy
from pathlib import Path
import torch
from parameterized import parameterized
from torch.utils.data import BatchSampler, DataLoader, RandomSampler, SequentialSampler
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM, get_scheduler
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
from transformers.utils import is_torch_bf16_available
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.scheduler import AcceleratedScheduler
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_deepspeed,
require_multi_device,
require_non_cpu,
slow,
)
from accelerate.test_utils.training import RegressionDataset, RegressionModel
from accelerate.utils.dataclasses import DeepSpeedPlugin
from accelerate.utils.deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
)
from accelerate.utils.other import patch_environment
set_seed(42)
GPT2_TINY = "sshleifer/tiny-gpt2"
MOBILEVIT = "apple/mobilevit-xx-small"
ZERO2 = "zero2"
ZERO3 = "zero3"
FP16 = "fp16"
BF16 = "bf16"
CUSTOM_OPTIMIZER = "custom_optimizer"
CUSTOM_SCHEDULER = "custom_scheduler"
DS_OPTIMIZER = "deepspeed_optimizer"
DS_SCHEDULER = "deepspeed_scheduler"
NO_CONFIG = "no_config"
CONFIG_WITH_NO_HIDDEN_SIZE = "config_with_no_hidden_size"
CONFIG_WITH_HIDDEN_SIZE = "config_with_hidden_size"
CONFIG_WITH_HIDDEN_SIZES = "config_with_hidden_sizes"
stages = [ZERO2, ZERO3]
optims = [CUSTOM_OPTIMIZER, DS_OPTIMIZER]
schedulers = [CUSTOM_SCHEDULER, DS_SCHEDULER]
model_types = [NO_CONFIG, CONFIG_WITH_NO_HIDDEN_SIZE, CONFIG_WITH_HIDDEN_SIZE, CONFIG_WITH_HIDDEN_SIZES]
if is_torch_bf16_available():
dtypes = [FP16, BF16]
else:
dtypes = [FP16]
def parameterized_custom_name_func(func, param_num, param):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args))
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
params = list(itertools.product(stages, dtypes))
optim_scheduler_params = list(itertools.product(optims, schedulers))
class DummyConfig:
def __init__(self):
self._name_or_path = "dummy"
@require_deepspeed
@require_non_cpu
class DeepSpeedConfigIntegration(AccelerateTestCase):
def setUp(self):
super().setUp()
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self.test_file_dir_str = str(path.parents[0])
self.ds_config_file = dict(
zero2=f"{self.test_file_dir_str}/ds_config_zero2.json",
zero3=f"{self.test_file_dir_str}/ds_config_zero3.json",
)
# use self.get_config_dict(stage) to use these to ensure the original is not modified
with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f:
config_zero2 = json.load(f)
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
config_zero3 = json.load(f)
# The following setting slows things down, so don't enable it by default unless needed by a test.
# It's in the file as a demo for users since we want everything to work out of the box even if slower.
config_zero3["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = False
self.ds_config_dict = dict(zero2=config_zero2, zero3=config_zero3)
self.dist_env = dict(
ACCELERATE_USE_DEEPSPEED="true",
MASTER_ADDR="localhost",
MASTER_PORT="10999",
RANK="0",
LOCAL_RANK="0",
WORLD_SIZE="1",
)
def get_config_dict(self, stage):
# As some tests modify the dict, always make a copy
return deepcopy(self.ds_config_dict[stage])
@parameterized.expand(stages, name_func=parameterized_custom_name_func)
def test_deepspeed_plugin(self, stage):
# Test zero3_init_flag will be set to False when ZeRO stage != 3
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
zero3_init_flag=True,
)
self.assertFalse(deepspeed_plugin.zero3_init_flag)
deepspeed_plugin.deepspeed_config = None
# Test zero3_init_flag will be set to True only when ZeRO stage == 3
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=3,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
zero3_init_flag=True,
)
self.assertTrue(deepspeed_plugin.zero3_init_flag)
deepspeed_plugin.deepspeed_config = None
# Test config files are loaded correctly
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[stage], zero3_init_flag=True)
if stage == ZERO2:
self.assertFalse(deepspeed_plugin.zero3_init_flag)
elif stage == ZERO3:
self.assertTrue(deepspeed_plugin.zero3_init_flag)
# Test `gradient_accumulation_steps` is set to 1 if unavailable in config file
with tempfile.TemporaryDirectory() as dirpath:
ds_config = self.get_config_dict(stage)
del ds_config["gradient_accumulation_steps"]
with open(os.path.join(dirpath, "ds_config.json"), "w") as out_file:
json.dump(ds_config, out_file)
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(dirpath, "ds_config.json"))
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 1)
deepspeed_plugin.deepspeed_config = None
# Test `ValueError` is raised if `zero_optimization` is unavailable in config file
with tempfile.TemporaryDirectory() as dirpath:
ds_config = self.get_config_dict(stage)
del ds_config["zero_optimization"]
with open(os.path.join(dirpath, "ds_config.json"), "w") as out_file:
json.dump(ds_config, out_file)
with self.assertRaises(ValueError) as cm:
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(dirpath, "ds_config.json"))
self.assertTrue(
"Please specify the ZeRO optimization config in the DeepSpeed config." in str(cm.exception)
)
deepspeed_plugin.deepspeed_config = None
# Test `deepspeed_config_process`
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[stage])
kwargs = {
"fp16.enabled": True,
"bf16.enabled": False,
"optimizer.params.lr": 5e-5,
"optimizer.params.weight_decay": 0.0,
"scheduler.params.warmup_min_lr": 0.0,
"scheduler.params.warmup_max_lr": 5e-5,
"scheduler.params.warmup_num_steps": 0,
"train_micro_batch_size_per_gpu": 16,
"gradient_clipping": 1.0,
"train_batch_size": 16,
"zero_optimization.reduce_bucket_size": 5e5,
"zero_optimization.stage3_prefetch_bucket_size": 5e5,
"zero_optimization.stage3_param_persistence_threshold": 5e5,
"zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
}
deepspeed_plugin.deepspeed_config_process(**kwargs)
for ds_key_long, value in kwargs.items():
config, ds_key = deepspeed_plugin.hf_ds_config.find_config_node(ds_key_long)
if config.get(ds_key) is not None:
self.assertEqual(config.get(ds_key), value)
# Test mismatches
mismatches = {
"optimizer.params.lr": 1e-5,
"optimizer.params.weight_decay": 1e-5,
"gradient_accumulation_steps": 2,
}
with self.assertRaises(ValueError) as cm:
new_kwargs = deepcopy(kwargs)
new_kwargs.update(mismatches)
deepspeed_plugin.deepspeed_config_process(**new_kwargs)
for key in mismatches.keys():
self.assertTrue(
key in str(cm.exception),
f"{key} is not in the exception message:\n{cm.exception}",
)
# Test `ValueError` is raised if some config file fields with `auto` value is missing in `kwargs`
deepspeed_plugin.deepspeed_config["optimizer"]["params"]["lr"] = "auto"
with self.assertRaises(ValueError) as cm:
del kwargs["optimizer.params.lr"]
deepspeed_plugin.deepspeed_config_process(**kwargs)
self.assertTrue("`optimizer.params.lr` not found in kwargs." in str(cm.exception))
@parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)
def test_accelerate_state_deepspeed(self, dtype):
AcceleratorState._reset_state(True)
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=ZERO2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
zero3_init_flag=True,
)
with mockenv_context(**self.dist_env):
state = Accelerator(mixed_precision=dtype, deepspeed_plugin=deepspeed_plugin).state
self.assertTrue(state.deepspeed_plugin.deepspeed_config[dtype]["enabled"])
def test_init_zero3(self):
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=3,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
zero3_init_flag=True,
)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin) # noqa: F841
from transformers.deepspeed import is_deepspeed_zero3_enabled
self.assertTrue(is_deepspeed_zero3_enabled())
@parameterized.expand(optim_scheduler_params, name_func=parameterized_custom_name_func)
def test_prepare_deepspeed(self, optim_type, scheduler_type):
# 1. Testing with one of the ZeRO Stages is enough to test the `_prepare_deepspeed` function.
# Here we test using ZeRO Stage 2 with FP16 enabled.
from deepspeed.runtime.engine import DeepSpeedEngine
kwargs = {
"optimizer.params.lr": 5e-5,
"optimizer.params.weight_decay": 0.0,
"scheduler.params.warmup_min_lr": 0.0,
"scheduler.params.warmup_max_lr": 5e-5,
"scheduler.params.warmup_num_steps": 0,
"train_micro_batch_size_per_gpu": 16,
"gradient_clipping": 1.0,
"train_batch_size": 16,
"zero_optimization.reduce_bucket_size": 5e5,
"zero_optimization.stage3_prefetch_bucket_size": 5e5,
"zero_optimization.stage3_param_persistence_threshold": 5e5,
"zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
}
if optim_type == CUSTOM_OPTIMIZER and scheduler_type == CUSTOM_SCHEDULER:
# Test custom optimizer + custom scheduler
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=False,
zero3_init_flag=False,
)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(mixed_precision="fp16", deepspeed_plugin=deepspeed_plugin)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
model = AutoModel.from_pretrained(GPT2_TINY)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(
"You cannot create a `DummyOptim` without specifying an optimizer in the config file."
in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(
"Either specify a scheduler in the config file or "
"pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
self.assertTrue(
"When using DeepSpeed, `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders "
"with `batch_size` attribute returning an integer value "
"or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file "
"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`."
in str(cm.exception)
)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(accelerator.deepspeed_config["zero_allow_untested_optimizer"])
self.assertTrue(accelerator.deepspeed_config["train_batch_size"], 16)
self.assertEqual(type(model), DeepSpeedEngine)
self.assertEqual(type(optimizer), DeepSpeedOptimizerWrapper)
self.assertEqual(type(lr_scheduler), AcceleratedScheduler)
self.assertEqual(type(accelerator.deepspeed_engine_wrapped), DeepSpeedEngineWrapper)
elif optim_type == DS_OPTIMIZER and scheduler_type == DS_SCHEDULER:
# Test DeepSpeed optimizer + DeepSpeed scheduler
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16")
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False)
model = AutoModel.from_pretrained(GPT2_TINY)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
kwargs["train_batch_size"] = (
kwargs["train_micro_batch_size_per_gpu"]
* deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]
* accelerator.num_processes
)
accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(
"You cannot specify an optimizer in the config file and in the code at the same time"
in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(
"You cannot specify a scheduler in the config file and in the code at the same time"
in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(
"You cannot specify a scheduler in the config file and in the code at the same time"
in str(cm.exception)
)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(type(model) == DeepSpeedEngine)
self.assertTrue(type(optimizer) == DeepSpeedOptimizerWrapper)
self.assertTrue(type(lr_scheduler) == DeepSpeedSchedulerWrapper)
self.assertTrue(type(accelerator.deepspeed_engine_wrapped) == DeepSpeedEngineWrapper)
elif optim_type == CUSTOM_OPTIMIZER and scheduler_type == DS_SCHEDULER:
# Test custom optimizer + DeepSpeed scheduler
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16")
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False)
model = AutoModel.from_pretrained(GPT2_TINY)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
kwargs["train_batch_size"] = (
kwargs["train_micro_batch_size_per_gpu"]
* deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]
* accelerator.num_processes
)
accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)
del accelerator.state.deepspeed_plugin.deepspeed_config["optimizer"]
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(type(model) == DeepSpeedEngine)
self.assertTrue(type(optimizer) == DeepSpeedOptimizerWrapper)
self.assertTrue(type(lr_scheduler) == DeepSpeedSchedulerWrapper)
self.assertTrue(type(accelerator.deepspeed_engine_wrapped) == DeepSpeedEngineWrapper)
elif optim_type == DS_OPTIMIZER and scheduler_type == CUSTOM_SCHEDULER:
# Test deepspeed optimizer + custom scheduler
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2])
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16")
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False)
model = AutoModel.from_pretrained(GPT2_TINY)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
kwargs["train_batch_size"] = (
kwargs["train_micro_batch_size_per_gpu"]
* deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]
* accelerator.num_processes
)
accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)
del accelerator.state.deepspeed_plugin.deepspeed_config["scheduler"]
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(
"You can only specify `accelerate.utils.DummyScheduler` in the code when using `accelerate.utils.DummyOptim`."
in str(cm.exception)
)
# passing `DummyScheduler` without `lr_scheduler_callable` should fail
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertTrue(
"Either specify a scheduler in the config file or "
"pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
in str(cm.exception)
)
# passing `lr_scheduler_callable` to DummyScheduler should enable DS Optim + Custom Scheduler
def _lr_scheduler_callable(optimizer):
return get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
dummy_lr_scheduler = DummyScheduler(dummy_optimizer, lr_scheduler_callable=_lr_scheduler_callable)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
def test_dataloader_with_batch_sampler(self):
deepspeed_plugin = DeepSpeedPlugin(
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=False,
zero3_init_flag=False,
)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(mixed_precision="fp16", deepspeed_plugin=deepspeed_plugin)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(
train_set, batch_sampler=BatchSampler(RandomSampler(train_set), batch_size=10, drop_last=False)
)
eval_dataloader = DataLoader(
eval_set, batch_sampler=BatchSampler(SequentialSampler(eval_set), batch_size=10, drop_last=False)
)
model = AutoModel.from_pretrained(GPT2_TINY)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertTrue(
"At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size. "
"Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file "
"or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`."
in str(cm.exception)
)
def test_save_checkpoints(self):
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[ZERO3],
zero3_init_flag=True,
)
del deepspeed_plugin.deepspeed_config["bf16"]
kwargs = {
"optimizer.params.lr": 5e-5,
"optimizer.params.weight_decay": 0.0,
"scheduler.params.warmup_min_lr": 0.0,
"scheduler.params.warmup_max_lr": 5e-5,
"scheduler.params.warmup_num_steps": 0,
"train_micro_batch_size_per_gpu": 16,
"gradient_clipping": 1.0,
"train_batch_size": 16,
"zero_optimization.reduce_bucket_size": 5e5,
"zero_optimization.stage3_prefetch_bucket_size": 5e5,
"zero_optimization.stage3_param_persistence_threshold": 5e5,
"zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
}
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16")
kwargs["train_batch_size"] = (
kwargs["train_micro_batch_size_per_gpu"]
* deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]
* accelerator.num_processes
)
accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
model = AutoModelForCausalLM.from_pretrained("gpt2")
dummy_optimizer = DummyOptim(params=model.parameters())
dummy_lr_scheduler = DummyScheduler(dummy_optimizer)
model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
with self.assertRaises(ValueError) as cm:
accelerator.get_state_dict(model)
msg = (
"Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. "
"To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or "
"set `zero3_save_16bit_model` to True when using `accelerate config`. "
"To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights."
)
self.assertTrue(msg in str(cm.exception))
def test_autofill_dsconfig(self):
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[ZERO3],
zero3_init_flag=True,
)
del deepspeed_plugin.deepspeed_config["bf16"]
del deepspeed_plugin.deepspeed_config["fp16"]
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
model = AutoModelForCausalLM.from_pretrained("gpt2")
dummy_optimizer = DummyOptim(params=model.parameters(), lr=5e-5, weight_decay=1e-4)
dummy_lr_scheduler = DummyScheduler(dummy_optimizer, warmup_num_steps=10, total_num_steps=1000)
hidden_size = model.config.hidden_size
model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
self.assertEqual(accelerator.deepspeed_config["train_micro_batch_size_per_gpu"], 16)
self.assertEqual(accelerator.deepspeed_config["train_batch_size"], 16)
self.assertEqual(accelerator.deepspeed_config["optimizer"]["params"]["lr"], 5e-5)
self.assertEqual(accelerator.deepspeed_config["optimizer"]["params"]["weight_decay"], 1e-4)
self.assertEqual(accelerator.deepspeed_config["scheduler"]["params"]["warmup_min_lr"], 0.0)
self.assertEqual(accelerator.deepspeed_config["scheduler"]["params"]["warmup_max_lr"], 5e-5)
self.assertEqual(accelerator.deepspeed_config["scheduler"]["params"]["warmup_num_steps"], 10)
self.assertEqual(accelerator.deepspeed_config["gradient_clipping"], 1.0)
self.assertEqual(
accelerator.deepspeed_config["zero_optimization"]["reduce_bucket_size"], hidden_size * hidden_size
)
self.assertEqual(
accelerator.deepspeed_config["zero_optimization"]["stage3_prefetch_bucket_size"],
0.9 * hidden_size * hidden_size,
)
self.assertEqual(
accelerator.deepspeed_config["zero_optimization"]["stage3_param_persistence_threshold"],
10 * hidden_size,
)
self.assertFalse(
accelerator.deepspeed_config["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"]
)
@parameterized.expand(model_types, name_func=parameterized_custom_name_func)
def test_autofill_comm_buffers_dsconfig(self, model_type):
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[ZERO3],
zero3_init_flag=True,
)
del deepspeed_plugin.deepspeed_config["bf16"]
del deepspeed_plugin.deepspeed_config["fp16"]
del deepspeed_plugin.deepspeed_config["optimizer"]
del deepspeed_plugin.deepspeed_config["scheduler"]
with mockenv_context(**self.dist_env):
accelerator = Accelerator(mixed_precision="fp16", deepspeed_plugin=deepspeed_plugin)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
model = RegressionModel()
if model_type == CONFIG_WITH_NO_HIDDEN_SIZE:
model.config = DummyConfig()
elif model_type == CONFIG_WITH_HIDDEN_SIZE:
model.config = AutoConfig.from_pretrained(GPT2_TINY)
hidden_size = model.config.hidden_size
elif model_type == CONFIG_WITH_HIDDEN_SIZES:
model.config = AutoConfig.from_pretrained(MOBILEVIT)
hidden_size = max(model.config.hidden_sizes)
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=1000,
)
if model_type == NO_CONFIG:
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
msg = "Can't find `model.config` entry"
self.assertTrue(msg in str(cm.exception))
elif model_type == CONFIG_WITH_NO_HIDDEN_SIZE:
with self.assertRaises(ValueError) as cm:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
msg = "Can find neither `model.config.hidden_size` nor `model.config.hidden_sizes`"
self.assertTrue(msg in str(cm.exception))
else:
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
self.assertEqual(
accelerator.deepspeed_config["zero_optimization"]["reduce_bucket_size"], hidden_size * hidden_size
)
self.assertEqual(
accelerator.deepspeed_config["zero_optimization"]["stage3_prefetch_bucket_size"],
0.9 * hidden_size * hidden_size,
)
self.assertEqual(
accelerator.deepspeed_config["zero_optimization"]["stage3_param_persistence_threshold"],
10 * hidden_size,
)
@parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func)
def test_autofill_dsconfig_from_ds_plugin(self, dtype):
ds_config = self.ds_config_dict["zero3"]
if dtype == BF16:
del ds_config["fp16"]
else:
del ds_config["bf16"]
ds_config[dtype]["enabled"] = "auto"
ds_config["zero_optimization"]["stage"] = "auto"
ds_config["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = "auto"
ds_config["zero_optimization"]["offload_optimizer"]["device"] = "auto"
ds_config["zero_optimization"]["offload_param"]["device"] = "auto"
ds_config["gradient_accumulation_steps"] = "auto"
ds_config["gradient_clipping"] = "auto"
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=ds_config,
zero3_init_flag=True,
gradient_accumulation_steps=2,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)
deepspeed_plugin = accelerator.state.deepspeed_plugin
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_clipping"], 1.0)
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 2)
self.assertEqual(deepspeed_plugin.deepspeed_config["zero_optimization"]["stage"], 2)
self.assertEqual(
deepspeed_plugin.deepspeed_config["zero_optimization"]["offload_optimizer"]["device"], "cpu"
)
self.assertEqual(deepspeed_plugin.deepspeed_config["zero_optimization"]["offload_param"]["device"], "cpu")
self.assertTrue(
deepspeed_plugin.deepspeed_config["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"]
)
self.assertTrue(deepspeed_plugin.deepspeed_config[dtype]["enabled"])
AcceleratorState._reset_state(True)
diff_dtype = "bf16" if dtype == "fp16" else "fp16"
with mockenv_context(**self.dist_env):
with self.assertRaises(ValueError) as cm:
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=diff_dtype)
self.assertTrue(
f"`--mixed_precision` arg cannot be set to `{diff_dtype}` when `{dtype}` is set in the DeepSpeed config file."
in str(cm.exception)
)
# base case of passing in `gradient_accumulation_steps` to `DeepSpeedPlugin`
AcceleratorState._reset_state(True)
deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=4)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype)
deepspeed_plugin = accelerator.state.deepspeed_plugin
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 4)
# filling the `auto` gradient_accumulation_steps via Accelerator's value
AcceleratorState._reset_state(True)
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=ds_config,
zero3_init_flag=True,
gradient_clipping=1.0,
zero_stage=2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
)
with mockenv_context(**self.dist_env):
accelerator = Accelerator(
deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype, gradient_accumulation_steps=8
)
train_set = RegressionDataset(length=80)
eval_set = RegressionDataset(length=20)
train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True)
eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False)
model = AutoModelForCausalLM.from_pretrained("gpt2")
dummy_optimizer = DummyOptim(params=model.parameters(), lr=5e-5, weight_decay=1e-4)
dummy_lr_scheduler = DummyScheduler(dummy_optimizer, warmup_num_steps=10, total_num_steps=1000)
model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare(
model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler
)
deepspeed_plugin = accelerator.state.deepspeed_plugin
self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 8)
def test_ds_config_assertions(self):
ambiguous_env = self.dist_env.copy()
ambiguous_env[
"ACCELERATE_CONFIG_DS_FIELDS"
] = "gradient_accumulation_steps,gradient_clipping,zero_stage,offload_optimizer_device,offload_param_device,zero3_save_16bit_model,mixed_precision"
with mockenv_context(**ambiguous_env):
with self.assertRaises(ValueError) as cm:
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[ZERO3],
zero3_init_flag=True,
gradient_accumulation_steps=1,
gradient_clipping=1.0,
zero_stage=ZERO2,
offload_optimizer_device="cpu",
offload_param_device="cpu",
zero3_save_16bit_model=True,
)
_ = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=FP16)
self.assertTrue(
"If you are using an accelerate config file, remove others config variables mentioned in the above specified list."
in str(cm.exception)
)
@parameterized.expand(stages, name_func=parameterized_custom_name_func)
def test_ds_config(self, stage):
deepspeed_plugin = DeepSpeedPlugin(
hf_ds_config=self.ds_config_file[stage],
zero3_init_flag=True,
)
self.assertEqual(deepspeed_plugin.zero_stage, int(stage.replace("zero", "")))
def test_basic_run(self):
mod_file = inspect.getfile(accelerate.test_utils)
test_file_path = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_performance.py"]
)
with tempfile.TemporaryDirectory() as dirpath:
cmd = [
"accelerate",
"launch",
"--num_processes=1",
"--num_machines=1",
"--machine_rank=0",
"--mixed_precision=fp16",
"--use_deepspeed",
"--gradient_accumulation_steps=1",
"--zero_stage=2",
"--offload_optimizer_device=none",
"--offload_param_device=none",
test_file_path,
"--model_name_or_path=distilbert-base-uncased",
"--num_epochs=1",
f"--output_dir={dirpath}",
]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd, env=os.environ.copy())
@require_deepspeed
@require_multi_device
@slow
class DeepSpeedIntegrationTest(TempDirTestCase):
def setUp(self):
super().setUp()
self._test_file_path = inspect.getfile(self.__class__)
path = Path(self._test_file_path).resolve()
self.test_file_dir_str = str(path.parents[0])
self.ds_config_file = dict(
zero2=f"{self.test_file_dir_str}/ds_config_zero2.json",
zero3=f"{self.test_file_dir_str}/ds_config_zero3.json",
)
self.stages = [1, 2, 3]
self.zero3_offload_config = False
self.performance_lower_bound = 0.82
self.peak_memory_usage_upper_bound = {
"multi_gpu_fp16": 3200,
"deepspeed_stage_1_fp16": 1600,
"deepspeed_stage_2_fp16": 2500,
"deepspeed_stage_3_zero_init_fp16": 2800,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "deepspeed_stage_3_cpu_offload_fp16": 1900,
}
self.n_train = 160
self.n_val = 160
mod_file = inspect.getfile(accelerate.test_utils)
self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"])
def test_performance(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_performance.py")
cmd = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--mixed_precision=fp16",
"--use_deepspeed",
"--gradient_accumulation_steps=1",
"--gradient_clipping=1",
"--zero3_init_flag=True",
"--zero3_save_16bit_model=True",
]
for stage in self.stages:
if stage == 1:
continue
cmd_stage = cmd.copy()
cmd_stage.extend([f"--zero_stage={stage}"])
cmd_stage.extend(["--offload_optimizer_device=none", "--offload_param_device=none"])
if self.zero3_offload_config:
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
ds_config = json.load(f)
del ds_config["bf16"]
del ds_config["optimizer"]["params"]["torch_adam"]
del ds_config["optimizer"]["params"]["adam_w_mode"]
ds_config["fp16"]["enabled"] = True
ds_config_path = os.path.join(self.tmpdir, "ds_config.json")
with open(ds_config_path, "w") as out_file:
json.dump(ds_config, out_file)
cmd_stage.extend([f"--deepspeed_config_file={ds_config_path}"])
cmd_stage.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--performance_lower_bound={self.performance_lower_bound}",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_stage, env=os.environ.copy())
def test_checkpointing(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_checkpointing.py")
cmd = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--mixed_precision=fp16",
"--use_deepspeed",
"--gradient_accumulation_steps=1",
"--gradient_clipping=1",
"--zero3_init_flag=True",
"--zero3_save_16bit_model=True",
]
for stage in self.stages:
if stage == 1:
continue
cmd_stage = cmd.copy()
cmd_stage.extend([f"--zero_stage={stage}"])
cmd_stage.extend(["--offload_optimizer_device=none", "--offload_param_device=none"])
if self.zero3_offload_config:
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
ds_config = json.load(f)
del ds_config["bf16"]
del ds_config["optimizer"]["params"]["torch_adam"]
del ds_config["optimizer"]["params"]["adam_w_mode"]
ds_config["fp16"]["enabled"] = True
ds_config_path = os.path.join(self.tmpdir, "ds_config.json")
with open(ds_config_path, "w") as out_file:
json.dump(ds_config, out_file)
cmd_stage.extend([f"--deepspeed_config_file={ds_config_path}"])
cmd_stage.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
"--partial_train_epoch=1",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_stage, env=os.environ.copy())
cmd_stage = cmd_stage[:-1]
resume_from_checkpoint = os.path.join(self.tmpdir, "epoch_0")
cmd_stage.extend(
[
f"--resume_from_checkpoint={resume_from_checkpoint}",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_stage, env=os.environ.copy())
def test_peak_memory_usage(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_peak_memory_usage.py")
cmd = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
cmd_stage = cmd.copy()
if "fp16" in spec:
cmd_stage.extend(["--mixed_precision=fp16"])
if "multi_gpu" in spec:
continue
else:
cmd_stage.extend(
[
"--use_deepspeed",
"--gradient_accumulation_steps=1",
"--gradient_clipping=1",
"--zero3_init_flag=True",
"--zero3_save_16bit_model=True",
]
)
for i in range(3):
if f"stage_{i+1}" in spec:
cmd_stage.extend([f"--zero_stage={i+1}"])
break
cmd_stage.extend(
[
"--offload_optimizer_device=none",
"--offload_param_device=none",
"--offload_optimizer_nvme_path=none",
"--offload_param_nvme_path=none",
]
)
if "cpu_offload" in spec:
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
ds_config = json.load(f)
del ds_config["bf16"]
del ds_config["fp16"]
del ds_config["optimizer"]["params"]["torch_adam"]
del ds_config["optimizer"]["params"]["adam_w_mode"]
ds_config_path = os.path.join(self.tmpdir, "ds_config.json")
with open(ds_config_path, "w") as out_file:
json.dump(ds_config, out_file)
cmd_stage.extend([f"--deepspeed_config_file={ds_config_path}"])
cmd_stage.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--peak_memory_upper_bound={peak_mem_upper_bound}",
f"--n_train={self.n_train}",
f"--n_val={self.n_val}",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd_stage, env=os.environ.copy())
def test_lr_scheduler(self):
self.test_file_path = os.path.join(self.test_scripts_folder, "test_performance.py")
cmd = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--mixed_precision=no",
"--use_deepspeed",
"--gradient_accumulation_steps=1",
"--gradient_clipping=1",
"--zero3_init_flag=True",
"--zero3_save_16bit_model=True",
"--zero_stage=3",
"--offload_optimizer_device=none",
"--offload_param_device=none",
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--performance_lower_bound={self.performance_lower_bound}",
]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd, env=os.environ.copy())
| accelerate/tests/deepspeed/test_deepspeed.py/0 | {
"file_path": "accelerate/tests/deepspeed/test_deepspeed.py",
"repo_id": "accelerate",
"token_count": 26042
} | 9 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from torch import nn
from accelerate.test_utils import memory_allocated_func, require_non_cpu, torch_device
from accelerate.utils.memory import find_executable_batch_size, release_memory
def raise_fake_out_of_memory():
raise RuntimeError("CUDA out of memory.")
class ModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class MemoryTest(unittest.TestCase):
def test_memory_implicit(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(batch_size):
nonlocal batch_sizes
batch_sizes.append(batch_size)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])
def test_memory_explicit(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(batch_size, arg1):
nonlocal batch_sizes
batch_sizes.append(batch_size)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arg1
bs, arg1 = mock_training_loop_function("hello")
self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8])
self.assertListEqual([bs, arg1], [8, "hello"])
def test_start_zero(self):
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(batch_size):
pass
with self.assertRaises(RuntimeError) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0])
def test_approach_zero(self):
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(batch_size):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(RuntimeError) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0])
def test_verbose_guard(self):
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(batch_size, arg1, arg2):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(TypeError) as cm:
mock_training_loop_function(128, "hello", "world")
self.assertIn("Batch size was passed into `f`", cm.exception.args[0])
self.assertIn("`f(arg1='hello', arg2='world')", cm.exception.args[0])
def test_any_other_error(self):
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(batch_size):
raise ValueError("Oops, we had an error!")
with self.assertRaises(ValueError) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!", cm.exception.args[0])
@require_non_cpu
def test_release_memory(self):
starting_memory = memory_allocated_func()
model = ModelForTest()
model.to(torch_device)
self.assertGreater(memory_allocated_func(), starting_memory)
model = release_memory(model)
self.assertEqual(memory_allocated_func(), starting_memory)
| accelerate/tests/test_memory_utils.py/0 | {
"file_path": "accelerate/tests/test_memory_utils.py",
"repo_id": "accelerate",
"token_count": 1753
} | 10 |
# Model arguments
model_name_or_path: teknium/OpenHermes-2.5-Mistral-7B
torch_dtype: null
# Data training arguments
dataset_mixer:
HuggingFaceH4/orca_dpo_pairs: 1.0
dataset_splits:
- train_prefs
- test_prefs
preprocessing_num_workers: 12
# Training arguments with sensible defaults
bf16: true
beta: 0.01
loss_type: sigmoid
do_eval: true
do_train: true
evaluation_strategy: steps
eval_steps: 100
gradient_accumulation_steps: 2
gradient_checkpointing: true
gradient_checkpointing_kwargs:
use_reentrant: False
hub_model_id: HuggingFaceH4/openhermes-2.5-mistral-7b-dpo
hub_model_revision: v1.0
learning_rate: 5.0e-7
logging_steps: 10
lr_scheduler_type: cosine
max_prompt_length: 512
num_train_epochs: 1
optim: adamw_torch
output_dir: data/openhermes-2.5-mistral-7b-dpo-v1.0
per_device_train_batch_size: 8
per_device_eval_batch_size: 8
save_strategy: "steps"
save_steps: 100
save_total_limit: 1
seed: 42
warmup_ratio: 0.1 | alignment-handbook/recipes/pref_align_scan/dpo/config_openhermes.yaml/0 | {
"file_path": "alignment-handbook/recipes/pref_align_scan/dpo/config_openhermes.yaml",
"repo_id": "alignment-handbook",
"token_count": 377
} | 11 |
# coding=utf-8
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from typing import Dict
import torch
from transformers import AutoTokenizer, BitsAndBytesConfig, PreTrainedTokenizer
from transformers.trainer_utils import get_last_checkpoint
from accelerate import Accelerator
from huggingface_hub import list_repo_files
from huggingface_hub.utils._errors import RepositoryNotFoundError
from huggingface_hub.utils._validators import HFValidationError
from peft import LoraConfig, PeftConfig
from .configs import DataArguments, DPOConfig, ModelArguments, SFTConfig
from .data import DEFAULT_CHAT_TEMPLATE
def get_current_device() -> int:
"""Get the current device. For GPU we return the local process index to enable multiple GPU training."""
return Accelerator().local_process_index if torch.cuda.is_available() else "cpu"
def get_kbit_device_map() -> Dict[str, int] | None:
"""Useful for running inference with quantized models by setting `device_map=get_peft_device_map()`"""
return {"": get_current_device()} if torch.cuda.is_available() else None
def get_quantization_config(model_args: ModelArguments) -> BitsAndBytesConfig | None:
if model_args.load_in_4bit:
compute_dtype = torch.float16
if model_args.torch_dtype not in {"auto", None}:
compute_dtype = getattr(torch, model_args.torch_dtype)
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_quant_type=model_args.bnb_4bit_quant_type,
bnb_4bit_use_double_quant=model_args.use_bnb_nested_quant,
)
elif model_args.load_in_8bit:
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
)
else:
quantization_config = None
return quantization_config
def get_tokenizer(model_args: ModelArguments, data_args: DataArguments) -> PreTrainedTokenizer:
"""Get the tokenizer for the model."""
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
revision=model_args.model_revision,
)
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
if data_args.truncation_side is not None:
tokenizer.truncation_side = data_args.truncation_side
# Set reasonable default for models without max length
if tokenizer.model_max_length > 100_000:
tokenizer.model_max_length = 2048
if data_args.chat_template is not None:
tokenizer.chat_template = data_args.chat_template
elif tokenizer.chat_template is None and tokenizer.default_chat_template is None:
tokenizer.chat_template = DEFAULT_CHAT_TEMPLATE
return tokenizer
def get_peft_config(model_args: ModelArguments) -> PeftConfig | None:
if model_args.use_peft is False:
return None
peft_config = LoraConfig(
r=model_args.lora_r,
lora_alpha=model_args.lora_alpha,
lora_dropout=model_args.lora_dropout,
bias="none",
task_type="CAUSAL_LM",
target_modules=model_args.lora_target_modules,
modules_to_save=model_args.lora_modules_to_save,
)
return peft_config
def is_adapter_model(model_name_or_path: str, revision: str = "main") -> bool:
try:
# Try first if model on a Hub repo
repo_files = list_repo_files(model_name_or_path, revision=revision)
except (HFValidationError, RepositoryNotFoundError):
# If not, check local repo
repo_files = os.listdir(model_name_or_path)
return "adapter_model.safetensors" in repo_files or "adapter_model.bin" in repo_files
def get_checkpoint(training_args: SFTConfig | DPOConfig) -> Path | None:
last_checkpoint = None
if os.path.isdir(training_args.output_dir):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
return last_checkpoint
| alignment-handbook/src/alignment/model_utils.py/0 | {
"file_path": "alignment-handbook/src/alignment/model_utils.py",
"repo_id": "alignment-handbook",
"token_count": 1649
} | 12 |
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
| candle/LICENSE-MIT/0 | {
"file_path": "candle/LICENSE-MIT",
"repo_id": "candle",
"token_count": 263
} | 13 |
# Writing a custom kernel
| candle/candle-book/src/cuda/writing.md/0 | {
"file_path": "candle/candle-book/src/cuda/writing.md",
"repo_id": "candle",
"token_count": 6
} | 14 |
# Training
Training starts with data. We're going to use the huggingface hub and
start with the Hello world dataset of machine learning, MNIST.
Let's start with downloading `MNIST` from [huggingface](https://huggingface.co/datasets/mnist).
This requires [`hf-hub`](https://github.com/huggingface/hf-hub).
```bash
cargo add hf-hub
```
This is going to be very hands-on for now.
```rust,ignore
{{#include ../../../candle-examples/src/lib.rs:book_training_1}}
```
This uses the standardized `parquet` files from the `refs/convert/parquet` branch on every dataset.
Our handles are now [`parquet::file::serialized_reader::SerializedFileReader`].
We can inspect the content of the files with:
```rust,ignore
{{#include ../../../candle-examples/src/lib.rs:book_training_2}}
```
You should see something like:
```bash
Column id 1, name label, value 6
Column id 0, name image, value {bytes: [137, ....]
Column id 1, name label, value 8
Column id 0, name image, value {bytes: [137, ....]
```
So each row contains 2 columns (image, label) with image being saved as bytes.
Let's put them into a useful struct.
| candle/candle-book/src/training/training.md/0 | {
"file_path": "candle/candle-book/src/training/training.md",
"repo_id": "candle",
"token_count": 361
} | 15 |
use crate::op::{BinaryOp, Op, ReduceOp, UnaryOp};
use crate::{Error, Result, Tensor, TensorId};
use std::collections::HashMap;
// arg has been reduced to node via reduce_dims, expand it back to arg.
// This has to handle keepdims.
fn broadcast_back(arg: &Tensor, node: &Tensor, reduced_dims: &[usize]) -> Result<Tensor> {
if arg.rank() == node.rank() {
// keepdim = true
node.broadcast_as(arg.shape())
} else {
// keepdim = false
// first expand the reduced dims.
node.reshape(reduced_dims)?.broadcast_as(arg.shape())
}
}
thread_local! {
static CANDLE_GRAD_DO_NOT_DETACH: bool = {
match std::env::var("CANDLE_GRAD_DO_NOT_DETACH") {
Ok(s) => {
!s.is_empty() && s != "0"
},
Err(_) => false,
}
}
}
impl Tensor {
/// Return all the nodes that lead to this value in a topologically sorted vec, the first
/// elements having dependencies on the latter ones, e.g. the first element if any is the
/// argument.
/// This assumes that the op graph is a DAG.
fn sorted_nodes(&self) -> Vec<&Tensor> {
// The vec of sorted nodes is passed as an owned value rather than a mutable reference
// to get around some lifetime limitations.
fn walk<'a>(
node: &'a Tensor,
nodes: Vec<&'a Tensor>,
already_seen: &mut HashMap<TensorId, bool>,
) -> (bool, Vec<&'a Tensor>) {
if let Some(&tg) = already_seen.get(&node.id()) {
return (tg, nodes);
}
let mut track_grad = false;
let mut nodes = if node.is_variable() {
// Do not call recursively on the "leaf" nodes.
track_grad = true;
nodes
} else if node.dtype().is_int() {
nodes
} else if let Some(op) = node.op() {
match op {
Op::IndexAdd(t1, t2, t3, _)
| Op::ScatterAdd(t1, t2, t3, _)
| Op::CustomOp3(t1, t2, t3, _)
| Op::WhereCond(t1, t2, t3) => {
let (tg, nodes) = walk(t1, nodes, already_seen);
track_grad |= tg;
let (tg, nodes) = walk(t2, nodes, already_seen);
track_grad |= tg;
let (tg, nodes) = walk(t3, nodes, already_seen);
track_grad |= tg;
nodes
}
Op::Conv1D {
arg: lhs,
kernel: rhs,
..
}
| Op::ConvTranspose1D {
arg: lhs,
kernel: rhs,
..
}
| Op::Conv2D {
arg: lhs,
kernel: rhs,
..
}
| Op::ConvTranspose2D {
arg: lhs,
kernel: rhs,
..
}
| Op::CustomOp2(lhs, rhs, _)
| Op::Binary(lhs, rhs, _)
| Op::Gather(lhs, rhs, _)
| Op::IndexSelect(lhs, rhs, _)
| Op::Matmul(lhs, rhs)
| Op::SliceScatter0(lhs, rhs, _) => {
let (tg, nodes) = walk(lhs, nodes, already_seen);
track_grad |= tg;
let (tg, nodes) = walk(rhs, nodes, already_seen);
track_grad |= tg;
nodes
}
Op::Cat(args, _) => args.iter().fold(nodes, |nodes, arg| {
let (tg, nodes) = walk(arg, nodes, already_seen);
track_grad |= tg;
nodes
}),
Op::Affine { arg, mul, .. } => {
if *mul == 0. {
nodes
} else {
let (tg, nodes) = walk(arg, nodes, already_seen);
track_grad |= tg;
nodes
}
}
Op::Unary(_node, UnaryOp::Ceil)
| Op::Unary(_node, UnaryOp::Floor)
| Op::Unary(_node, UnaryOp::Round) => nodes,
Op::Reshape(node)
| Op::UpsampleNearest1D(node)
| Op::UpsampleNearest2D { arg: node, .. }
| Op::AvgPool2D { arg: node, .. }
| Op::MaxPool2D { arg: node, .. }
| Op::Copy(node)
| Op::Broadcast(node)
| Op::Cmp(node, _)
| Op::Reduce(node, ReduceOp::Min | ReduceOp::Sum | ReduceOp::Max, _)
| Op::ToDevice(node)
| Op::Transpose(node, _, _)
| Op::Permute(node, _)
| Op::Narrow(node, _, _, _)
| Op::Unary(node, _)
| Op::Elu(node, _)
| Op::Powf(node, _)
| Op::CustomOp1(node, _) => {
let (tg, nodes) = walk(node, nodes, already_seen);
track_grad |= tg;
nodes
}
Op::ToDType(node) => {
if node.dtype().is_float() {
let (tg, nodes) = walk(node, nodes, already_seen);
track_grad |= tg;
nodes
} else {
nodes
}
}
Op::Reduce(_, ReduceOp::ArgMin | ReduceOp::ArgMax, _) => nodes,
}
} else {
nodes
};
already_seen.insert(node.id(), track_grad);
if track_grad {
nodes.push(node);
}
(track_grad, nodes)
}
let (_tg, mut nodes) = walk(self, vec![], &mut HashMap::new());
nodes.reverse();
nodes
}
pub fn backward(&self) -> Result<GradStore> {
let sorted_nodes = self.sorted_nodes();
let mut grads = GradStore::new();
grads.insert(self, self.ones_like()?.contiguous()?);
for node in sorted_nodes.iter() {
if node.is_variable() {
continue;
}
let grad = grads
.remove(node)
.expect("candle internal error - grad not populated");
// https://github.com/huggingface/candle/issues/1241
// Ideally, we would make these operations in place where possible to ensure that we
// do not have to allocate too often. Here we just call `.detach` to avoid computing
// the backprop graph of the backprop itself. This would be an issue for second order
// derivatives but these are out of scope at the moment.
let do_not_detach = CANDLE_GRAD_DO_NOT_DETACH.with(|b| *b);
let grad = if do_not_detach { grad } else { grad.detach()? };
if let Some(op) = node.op() {
match op {
Op::Binary(lhs, rhs, BinaryOp::Add) => {
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&grad)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.add(&grad)?;
}
Op::Binary(lhs, rhs, BinaryOp::Sub) => {
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&grad)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.sub(&grad)?;
}
Op::Binary(lhs, rhs, BinaryOp::Mul) => {
let lhs_grad = grad.mul(rhs)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?;
let rhs_grad = grad.mul(lhs)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?;
}
Op::Binary(lhs, rhs, BinaryOp::Div) => {
let lhs_grad = grad.div(rhs)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?;
let rhs_grad = grad.mul(lhs)?.div(&rhs.sqr()?)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.sub(&rhs_grad)?;
}
Op::Binary(lhs, rhs, BinaryOp::Minimum)
| Op::Binary(lhs, rhs, BinaryOp::Maximum) => {
let mask_lhs = node.eq(lhs)?.to_dtype(grad.dtype())?;
let mask_rhs = node.eq(rhs)?.to_dtype(grad.dtype())?;
// If both masks are 1 one the same point, we want to scale the
// gradient by 0.5 rather than 1.
let lhs_grad = mask_lhs.mul(&grad)?.div(&(&mask_rhs + 1.)?)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?;
let rhs_grad = mask_rhs.mul(&grad)?.div(&(&mask_lhs + 1.)?)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?;
}
Op::WhereCond(pred, t, f) => {
let zeros = grad.zeros_like()?;
let t_sum_grad = grads.or_insert(t)?;
let t_grad = pred.where_cond(&grad, &zeros)?;
*t_sum_grad = t_sum_grad.add(&t_grad)?;
let f_sum_grad = grads.or_insert(f)?;
let f_grad = pred.where_cond(&zeros, &grad)?;
*f_sum_grad = f_sum_grad.add(&f_grad)?;
}
Op::Conv1D {
arg,
kernel,
padding,
stride,
dilation,
} => {
// The output height for conv_transpose1d is:
// (l_in - 1) * stride - 2 * padding + dilation * (k_size - 1) + out_padding + 1
let grad_l_in = grad.dim(2)?;
let k_size = kernel.dim(2)?;
let out_size =
(grad_l_in - 1) * stride + dilation * (k_size - 1) + 1 - 2 * padding;
let out_padding = arg.dim(2)? - out_size;
let grad_arg = grad.conv_transpose1d(
kernel,
*padding,
out_padding,
*stride,
*dilation,
)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad_arg)?;
let grad_kernel = arg
.transpose(0, 1)?
.conv1d(&grad.transpose(0, 1)?, *padding, *dilation, *stride, 1)?
.transpose(0, 1)?;
let sum_grad = grads.or_insert(kernel)?;
let (_, _, k0) = kernel.dims3()?;
let (_, _, g_k0) = grad_kernel.dims3()?;
let grad_kernel = if g_k0 != k0 {
grad_kernel.narrow(2, 0, k0)?
} else {
grad_kernel
};
*sum_grad = sum_grad.add(&grad_kernel)?;
}
Op::Conv2D {
arg,
kernel,
padding,
stride,
dilation,
} => {
// The output height for conv_transpose2d is:
// (i_h - 1) * stride - 2 * padding + dilation * (k_h - 1) + out_padding + 1
let grad_h = grad.dim(2)?;
let k_h = kernel.dim(2)?;
let out_size =
(grad_h - 1) * stride + dilation * (k_h - 1) + 1 - 2 * padding;
let out_padding = arg.dim(2)? - out_size;
let grad_arg = grad.conv_transpose2d(
kernel,
*padding,
out_padding,
*stride,
*dilation,
)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad_arg)?;
let grad_kernel = arg
.transpose(0, 1)?
.conv2d(&grad.transpose(0, 1)?, *padding, *dilation, *stride, 1)?
.transpose(0, 1)?;
let sum_grad = grads.or_insert(kernel)?;
let (_, _, k0, k1) = kernel.dims4()?;
let (_, _, g_k0, g_k1) = grad_kernel.dims4()?;
let grad_kernel = if g_k0 != k0 || g_k1 != k1 {
grad_kernel.narrow(2, 0, k0)?.narrow(3, 0, k1)?
} else {
grad_kernel
};
*sum_grad = sum_grad.add(&grad_kernel)?;
}
Op::ConvTranspose1D { .. } => Err(Error::BackwardNotSupported {
op: "conv-transpose1d",
})?,
Op::ConvTranspose2D { .. } => Err(Error::BackwardNotSupported {
op: "conv-transpose2d",
})?,
Op::AvgPool2D {
arg,
kernel_size,
stride,
} => {
if kernel_size != stride {
crate::bail!("backward not supported for avgpool2d if ksize {kernel_size:?} != stride {stride:?}")
}
let (_n, _c, h, w) = arg.dims4()?;
let grad_arg = grad.upsample_nearest2d(h, w)?;
let grad_arg =
(grad_arg * (1f64 / (kernel_size.0 * kernel_size.1) as f64))?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad_arg)?;
}
Op::MaxPool2D {
arg,
kernel_size,
stride,
} => {
if kernel_size != stride {
crate::bail!("backward not supported for maxpool2d if ksize {kernel_size:?} != stride {stride:?}")
}
let (_n, _c, h, w) = arg.dims4()?;
// For computing the max-pool gradient, we compute a mask where a 1 means
// that the element is the maximum, then we apply this mask to the
// upsampled gradient (taking into account that multiple max may exist so
// we scale the gradient for this case).
let node_upsampled = node.upsample_nearest2d(h, w)?;
let mask = arg.eq(&node_upsampled)?.to_dtype(arg.dtype())?;
let avg = mask.avg_pool2d_with_stride(*kernel_size, *stride)?;
let grad_arg = ((grad * avg)?.upsample_nearest2d(h, w)? * mask)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad_arg)?;
}
Op::UpsampleNearest1D { .. } => Err(Error::BackwardNotSupported {
op: "upsample-nearest1d",
})?,
Op::UpsampleNearest2D {
arg,
target_h,
target_w,
} => {
let (_n, c, h, w) = arg.dims4()?;
if target_h % h != 0 || target_w % w != 0 {
crate::bail!("backward not supported for non integer upscaling factors")
}
let scale_h = target_h / h;
let scale_w = target_w / w;
if scale_h != scale_w {
crate::bail!("backward not supported for non uniform upscaling factors")
};
let kernel =
Tensor::ones((c, 1, scale_h, scale_w), arg.dtype(), arg.device())?;
let conv_sum = grad.conv2d(&kernel, 0, scale_h, 1, c)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = conv_sum;
}
Op::SliceScatter0(lhs, rhs, start_rhs) => {
let rhs_sum_grad = grads.or_insert(rhs)?;
let rhs_grad = grad.narrow(0, *start_rhs, rhs.dim(0)?)?;
*rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
let lhs_grad = grad.slice_scatter0(&rhs.zeros_like()?, *start_rhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?
}
Op::Gather(arg, indexes, dim) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.scatter_add(indexes, &grad, *dim)?;
}
Op::ScatterAdd(init, indexes, src, dim) => {
let init_sum_grad = grads.or_insert(init)?;
*init_sum_grad = init_sum_grad.add(&grad)?;
let src_grad = grad.gather(indexes, *dim)?;
let src_sum_grad = grads.or_insert(src)?;
*src_sum_grad = src_sum_grad.add(&src_grad)?;
}
Op::IndexAdd(init, indexes, src, dim) => {
let init_sum_grad = grads.or_insert(init)?;
*init_sum_grad = init_sum_grad.add(&grad)?;
let src_grad = grad.index_select(indexes, *dim)?;
let src_sum_grad = grads.or_insert(src)?;
*src_sum_grad = src_sum_grad.add(&src_grad)?;
}
Op::IndexSelect(arg, indexes, dim) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.index_add(indexes, &grad, *dim)?;
}
Op::Matmul(lhs, rhs) => {
// Skipping checks, the op went ok, we can skip
// the matmul size checks for now.
let lhs_grad = grad.matmul(&rhs.t()?)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?;
let rhs_grad = lhs.t()?.matmul(&grad)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?;
}
Op::Cat(args, dim) => {
let mut start_idx = 0;
for arg in args {
let len = arg.dims()[*dim];
let arg_grad = grad.narrow(*dim, start_idx, len)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?;
start_idx += len;
}
}
Op::Broadcast(arg) => {
let arg_dims = arg.dims();
let node_dims = node.dims();
// The number of dims that have been inserted on the left.
let left_dims = node_dims.len() - arg_dims.len();
let mut sum_dims: Vec<usize> = (0..left_dims).collect();
for (dim, (node_dim, arg_dim)) in node_dims[left_dims..]
.iter()
.zip(arg_dims.iter())
.enumerate()
{
if node_dim != arg_dim {
sum_dims.push(dim + left_dims)
}
}
let mut arg_grad = grad.sum_keepdim(sum_dims.as_slice())?;
for _i in 0..left_dims {
arg_grad = arg_grad.squeeze(0)?
}
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad.broadcast_as(sum_grad.dims())?)?;
}
Op::Reduce(arg, ReduceOp::Sum, reduced_dims) => {
let grad = broadcast_back(arg, &grad, reduced_dims)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad)?;
}
Op::Cmp(_args, _) => {}
Op::Reduce(arg, ReduceOp::Max, reduced_dims) => {
let node = broadcast_back(arg, node, reduced_dims)?;
let grad = broadcast_back(arg, &grad, reduced_dims)?;
let grad = node.eq(arg)?.to_dtype(grad.dtype())?.mul(&grad)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad.broadcast_as(sum_grad.dims())?)?;
}
Op::Reduce(arg, ReduceOp::Min, reduced_dims) => {
let node = broadcast_back(arg, node, reduced_dims)?;
let grad = broadcast_back(arg, &grad, reduced_dims)?;
let grad = node.eq(arg)?.to_dtype(grad.dtype())?.mul(&grad)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad.broadcast_as(sum_grad.dims())?)?;
}
Op::ToDType(arg) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad.to_dtype(arg.dtype())?)?
}
Op::Copy(arg) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad)?
}
Op::Affine { arg, mul, .. } => {
let arg_grad = grad.affine(*mul, 0.)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Unary(arg, UnaryOp::Log) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&(grad / arg)?)?
}
Op::Unary(arg, UnaryOp::Sin) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&(&grad * arg.cos())?)?
}
Op::Unary(arg, UnaryOp::Cos) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.sub(&(&grad * arg.sin())?)?
}
Op::Unary(arg, UnaryOp::Tanh) => {
let sum_grad = grads.or_insert(arg)?;
let minus_dtanh = (node.sqr()? - 1.)?;
*sum_grad = sum_grad.sub(&(&grad * &minus_dtanh)?)?
}
Op::Unary(arg, UnaryOp::Abs) => {
let sum_grad = grads.or_insert(arg)?;
let ones = arg.ones_like()?;
let abs_grad = arg.ge(&arg.zeros_like()?)?.where_cond(&ones, &ones.neg()?);
*sum_grad = sum_grad.add(&(&grad * abs_grad)?)?
}
Op::Unary(arg, UnaryOp::Exp) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&(&grad * *node)?)?
}
Op::Unary(arg, UnaryOp::Neg) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.sub(&grad)?
}
Op::Unary(arg, UnaryOp::Recip) => {
let sum_grad = grads.or_insert(arg)?;
let grad = (grad / arg.sqr()?)?;
*sum_grad = sum_grad.sub(&grad)?
}
&Op::Narrow(ref arg, dim, start_idx, len) => {
let arg_dims = arg.dims();
let left_pad = if start_idx == 0 {
None
} else {
let mut dims = arg_dims.to_vec();
dims[dim] = start_idx;
Some(Tensor::zeros(dims, grad.dtype(), grad.device())?)
};
let right_pad = arg_dims[dim] - start_idx - len;
let right_pad = if right_pad == 0 {
None
} else {
let mut dims = arg_dims.to_vec();
dims[dim] = right_pad;
Some(Tensor::zeros(dims, grad.dtype(), grad.device())?)
};
let arg_grad = match (left_pad, right_pad) {
(None, None) => grad,
(Some(l), None) => Tensor::cat(&[&l, &grad], dim)?,
(None, Some(r)) => Tensor::cat(&[&grad, &r], dim)?,
(Some(l), Some(r)) => Tensor::cat(&[&l, &grad, &r], dim)?,
};
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Reduce(_, ReduceOp::ArgMin, _) => {}
Op::Reduce(_, ReduceOp::ArgMax, _) => {}
Op::Reshape(arg) => {
let arg_grad = grad.reshape(arg.dims())?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Unary(_, UnaryOp::Ceil) => Err(Error::BackwardNotSupported { op: "ceil" })?,
Op::Unary(_, UnaryOp::Floor) => {
Err(Error::BackwardNotSupported { op: "floor" })?
}
Op::Unary(_, UnaryOp::Round) => {
Err(Error::BackwardNotSupported { op: "round" })?
}
Op::Unary(arg, UnaryOp::Gelu) => {
let sum_grad = grads.or_insert(arg)?;
let cube = arg.powf(3.)?;
let tanh = (0.0356774 * &cube + (0.797885 * arg)?)?.tanh()?;
let gelu_grad = (((0.5 * &tanh)?
+ (0.0535161 * cube + (0.398942 * arg)?)? * (1. - tanh.powf(2.)?))?
+ 0.5)?;
*sum_grad = sum_grad.add(&(&grad * gelu_grad)?)?
}
Op::Unary(arg, UnaryOp::Erf) => {
let sum_grad = grads.or_insert(arg)?;
// d/dx erf(x) = 2/sqrt(pi) * e^(-x^2)
let erf_grad =
(2. / std::f64::consts::PI.sqrt()) * (arg.sqr()?.neg()?).exp()?;
*sum_grad = sum_grad.add(&(&grad * erf_grad)?)?
}
Op::Unary(arg, UnaryOp::GeluErf) => {
let sum_grad = grads.or_insert(arg)?;
// d/dx gelu_erf(x) = 0.5 + 0.398942 e^(-x^2/2) x + 0.5 erf(x/sqrt(2))
let neg_half_square = (arg.sqr()?.neg()? / 2.)?;
let scaled_exp_arg = (0.398942 * neg_half_square.exp()? * arg)?;
let arg_scaled_sqrt = (arg / 2f64.sqrt())?;
let erf_scaled_sqrt = (0.5 * arg_scaled_sqrt.erf()?)?;
let gelu_erf_grad = (0.5 + scaled_exp_arg + erf_scaled_sqrt)?;
*sum_grad = sum_grad.add(&(&grad * gelu_erf_grad)?)?;
}
Op::Unary(arg, UnaryOp::Relu) => {
let sum_grad = grads.or_insert(arg)?;
let relu_grad = arg.ge(&arg.zeros_like()?)?.to_dtype(arg.dtype())?;
*sum_grad = sum_grad.add(&(&grad * relu_grad)?)?
}
Op::Elu(arg, alpha) => {
// d/dx elu(x) = 1 for x > 0, alpha * e^x for x <= 0
let sum_grad = grads.or_insert(arg)?;
let zeros = arg.zeros_like()?;
let positive_mask = arg.gt(&zeros)?.to_dtype(arg.dtype())?;
let negative_mask = arg.le(&zeros)?.to_dtype(arg.dtype())?;
let negative_exp_mask = ((negative_mask * arg.exp())? * *alpha)?;
let combined_mask = (positive_mask + negative_exp_mask)?;
*sum_grad = sum_grad.add(&(grad * combined_mask)?)?
}
Op::Powf(arg, e) => {
let arg_grad = (&(grad * arg.powf(e - 1.)?)? * *e)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::CustomOp1(arg, c) => {
if let Some(arg_grad) = c.bwd(arg, node, &grad)? {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
}
Op::CustomOp2(arg1, arg2, c) => {
let (arg_grad1, arg_grad2) = c.bwd(arg1, arg2, node, &grad)?;
if let Some(arg_grad1) = arg_grad1 {
let sum_grad = grads.or_insert(arg1)?;
*sum_grad = sum_grad.add(&arg_grad1)?
}
if let Some(arg_grad2) = arg_grad2 {
let sum_grad = grads.or_insert(arg2)?;
*sum_grad = sum_grad.add(&arg_grad2)?
}
}
Op::CustomOp3(arg1, arg2, arg3, c) => {
let (arg_grad1, arg_grad2, arg_grad3) =
c.bwd(arg1, arg2, arg3, node, &grad)?;
if let Some(arg_grad1) = arg_grad1 {
let sum_grad = grads.or_insert(arg1)?;
*sum_grad = sum_grad.add(&arg_grad1)?
}
if let Some(arg_grad2) = arg_grad2 {
let sum_grad = grads.or_insert(arg2)?;
*sum_grad = sum_grad.add(&arg_grad2)?
}
if let Some(arg_grad3) = arg_grad3 {
let sum_grad = grads.or_insert(arg3)?;
*sum_grad = sum_grad.add(&arg_grad3)?
}
}
Op::Unary(arg, UnaryOp::Sqr) => {
let arg_grad = arg.mul(&grad)?.affine(2., 0.)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Unary(arg, UnaryOp::Sqrt) => {
let arg_grad = grad.div(node)?.affine(0.5, 0.)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::ToDevice(arg) => {
let sum_grad = grads.or_insert(arg)?;
let arg_grad = grad.to_device(sum_grad.device())?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Transpose(arg, dim1, dim2) => {
let arg_grad = grad.transpose(*dim1, *dim2)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Permute(arg, dims) => {
let mut inv_dims = vec![0; dims.len()];
for (i, &dim_idx) in dims.iter().enumerate() {
inv_dims[dim_idx] = i
}
let arg_grad = grad.permute(inv_dims)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
};
}
}
Ok(grads)
}
}
#[derive(Debug)]
pub struct GradStore(HashMap<TensorId, Tensor>);
impl GradStore {
fn new() -> Self {
GradStore(HashMap::new())
}
pub fn get_id(&self, id: TensorId) -> Option<&Tensor> {
self.0.get(&id)
}
pub fn get(&self, tensor: &Tensor) -> Option<&Tensor> {
self.0.get(&tensor.id())
}
pub fn remove(&mut self, tensor: &Tensor) -> Option<Tensor> {
self.0.remove(&tensor.id())
}
pub fn insert(&mut self, tensor: &Tensor, grad: Tensor) -> Option<Tensor> {
self.0.insert(tensor.id(), grad)
}
fn or_insert(&mut self, tensor: &Tensor) -> Result<&mut Tensor> {
use std::collections::hash_map::Entry;
let grad = match self.0.entry(tensor.id()) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => {
let grad = tensor.zeros_like()?;
entry.insert(grad)
}
};
Ok(grad)
}
}
| candle/candle-core/src/backprop.rs/0 | {
"file_path": "candle/candle-core/src/backprop.rs",
"repo_id": "candle",
"token_count": 22705
} | 16 |
#![allow(dead_code)]
use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT};
use crate::{CpuStorage, DType, Error, Layout, Result, Shape};
#[derive(Debug, Clone)]
pub struct MetalDevice;
#[derive(Debug)]
pub struct MetalStorage;
#[derive(thiserror::Error, Debug)]
pub enum MetalError {
#[error("{0}")]
Message(String),
}
impl From<String> for MetalError {
fn from(e: String) -> Self {
MetalError::Message(e)
}
}
macro_rules! fail {
() => {
unimplemented!("metal support has not been enabled, add `metal` feature to enable.")
};
}
impl crate::backend::BackendStorage for MetalStorage {
type Device = MetalDevice;
fn try_clone(&self, _: &Layout) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn dtype(&self) -> DType {
fail!()
}
fn device(&self) -> &Self::Device {
fail!()
}
fn to_cpu_storage(&self) -> Result<CpuStorage> {
Err(Error::NotCompiledWithMetalSupport)
}
fn affine(&self, _: &Layout, _: f64, _: f64) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn powf(&self, _: &Layout, _: f64) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn elu(&self, _: &Layout, _: f64) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn reduce_op(&self, _: ReduceOp, _: &Layout, _: &[usize]) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn cmp(&self, _: CmpOp, _: &Self, _: &Layout, _: &Layout) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn to_dtype(&self, _: &Layout, _: DType) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn unary_impl<B: UnaryOpT>(&self, _: &Layout) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn binary_impl<B: BinaryOpT>(&self, _: &Self, _: &Layout, _: &Layout) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn where_cond(&self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn conv1d(
&self,
_: &Layout,
_: &Self,
_: &Layout,
_: &crate::conv::ParamsConv1D,
) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn conv_transpose1d(
&self,
_l: &Layout,
_kernel: &Self,
_kernel_l: &Layout,
_params: &crate::conv::ParamsConvTranspose1D,
) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn conv2d(
&self,
_: &Layout,
_: &Self,
_: &Layout,
_: &crate::conv::ParamsConv2D,
) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn conv_transpose2d(
&self,
_l: &Layout,
_kernel: &Self,
_kernel_l: &Layout,
_params: &crate::conv::ParamsConvTranspose2D,
) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn index_select(&self, _: &Self, _: &Layout, _: &Layout, _: usize) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn gather(&self, _: &Layout, _: &Self, _: &Layout, _: usize) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn scatter_add(
&self,
_: &Layout,
_: &Self,
_: &Layout,
_: &Self,
_: &Layout,
_: usize,
) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn index_add(
&self,
_: &Layout,
_: &Self,
_: &Layout,
_: &Self,
_: &Layout,
_: usize,
) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn matmul(
&self,
_: &Self,
_: (usize, usize, usize, usize),
_: &Layout,
_: &Layout,
) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn copy_strided_src(&self, _: &mut Self, _: usize, _: &Layout) -> Result<()> {
Err(Error::NotCompiledWithMetalSupport)
}
fn avg_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn max_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn upsample_nearest2d(&self, _: &Layout, _: usize, _: usize) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
}
impl crate::backend::BackendDevice for MetalDevice {
type Storage = MetalStorage;
fn new(_: usize) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
fn set_seed(&self, _: u64) -> Result<()> {
Err(Error::NotCompiledWithMetalSupport)
}
fn location(&self) -> crate::DeviceLocation {
fail!()
}
fn same_device(&self, _: &Self) -> bool {
fail!()
}
fn zeros_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage> {
Err(Error::NotCompiledWithMetalSupport)
}
fn ones_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage> {
Err(Error::NotCompiledWithMetalSupport)
}
fn storage_from_cpu_storage(&self, _: &CpuStorage) -> Result<Self::Storage> {
Err(Error::NotCompiledWithMetalSupport)
}
fn rand_uniform(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage> {
Err(Error::NotCompiledWithMetalSupport)
}
fn rand_normal(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage> {
Err(Error::NotCompiledWithMetalSupport)
}
}
| candle/candle-core/src/dummy_metal_backend.rs/0 | {
"file_path": "candle/candle-core/src/dummy_metal_backend.rs",
"repo_id": "candle",
"token_count": 2690
} | 17 |
use super::k_quants::{
BlockQ2K, BlockQ3K, BlockQ4K, BlockQ4_0, BlockQ5K, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K,
};
use crate::Result;
use byteorder::{ByteOrder, LittleEndian};
#[allow(unused_imports)]
#[cfg(target_arch = "arm")]
use core::arch::arm::*;
#[allow(unused_imports)]
#[cfg(target_arch = "aarch64")]
use core::arch::aarch64::*;
#[inline(always)]
unsafe fn vdotq_s32(a: int8x16_t, b: int8x16_t) -> int32x4_t {
// TODO: dotprod
let p0 = vmull_s8(vget_low_s8(a), vget_low_s8(b));
let p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b));
vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1))
}
#[inline(always)]
pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> {
let qk = QK8_0;
let nb = n / qk;
if n % QK8_0 != 0 {
crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}")
}
unsafe {
let mut sumv0 = vdupq_n_f32(0.0f32);
for i in 0..nb {
let x0 = &xs[i];
let y0 = &ys[i];
let m4b = vdupq_n_u8(0x0F);
let s8b = vdupq_n_s8(0x8);
let v0_0 = vld1q_u8(x0.qs.as_ptr());
// 4-bit -> 8-bit
let v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b));
let v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
// sub 8
let v0_0ls = vsubq_s8(v0_0l, s8b);
let v0_0hs = vsubq_s8(v0_0h, s8b);
// load y
let v1_0l = vld1q_s8(y0.qs.as_ptr());
let v1_0h = vld1q_s8(y0.qs.as_ptr().add(16));
let pl0 = vdotq_s32(v0_0ls, v1_0l);
let ph0 = vdotq_s32(v0_0hs, v1_0h);
sumv0 = vmlaq_n_f32(
sumv0,
vcvtq_f32_s32(vaddq_s32(pl0, ph0)),
x0.d.to_f32() * y0.d.to_f32(),
);
}
Ok(vaddvq_f32(sumv0))
}
}
#[inline(always)]
pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> {
let qk = QK8_0;
if n % QK8_0 != 0 {
crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}")
}
let nb = n / QK8_0;
unsafe {
let mut sumv0 = vdupq_n_f32(0.0f32);
for i in 0..nb {
let x0 = &xs[i];
let y0 = &ys[i];
let x0_0 = vld1q_s8(x0.qs.as_ptr());
let x0_1 = vld1q_s8(x0.qs.as_ptr().add(16));
// load y
let y0_0 = vld1q_s8(y0.qs.as_ptr());
let y0_1 = vld1q_s8(y0.qs.as_ptr().add(16));
let p0 = vdotq_s32(x0_0, y0_0);
let p1 = vdotq_s32(x0_1, y0_1);
sumv0 = vmlaq_n_f32(
sumv0,
vcvtq_f32_s32(vaddq_s32(p0, p1)),
x0.d.to_f32() * y0.d.to_f32(),
);
}
Ok(vaddvq_f32(sumv0))
}
}
#[inline(always)]
pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> {
let qk = QK_K;
if n % QK_K != 0 {
crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}")
}
let mut sumf = 0f32;
for (xs, ys) in xs.iter().zip(ys.iter()) {
unsafe {
let mut sum_i = vdupq_n_s32(0);
let scale = xs.d * ys.d;
let xs = xs.qs.as_ptr();
let ys = ys.qs.as_ptr();
for i in (0..QK_K).step_by(16) {
let xs = vld1q_s8(xs.add(i));
let ys = vld1q_s8(ys.add(i));
let xy = vdotq_s32(xs, ys);
sum_i = vaddq_s32(sum_i, xy)
}
sumf += vaddvq_s32(sum_i) as f32 * scale
}
}
Ok(sumf)
}
#[inline(always)]
pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}")
}
let mut sum = 0f32;
unsafe {
let m4b = vdupq_n_u8(0xF);
let mone = vdupq_n_u8(3);
for (x, y) in xs.iter().zip(ys.iter()) {
let d_all = x.d.to_f32();
let mut q6 = x.ql.as_ptr();
let mut qh = x.qh.as_ptr();
let mut q8 = y.qs.as_ptr();
let mut scale = x.scales.as_ptr();
let q8sums = vld1q_s16_x2(y.bsums.as_ptr());
let scales = vld1q_s8(scale);
let q6scales = int16x8x2_t(
vmovl_s8(vget_low_s8(scales)),
vmovl_s8(vget_high_s8(scales)),
);
let prod = vaddq_s32(
vaddq_s32(
vmull_s16(vget_low_s16(q8sums.0), vget_low_s16(q6scales.0)),
vmull_s16(vget_high_s16(q8sums.0), vget_high_s16(q6scales.0)),
),
vaddq_s32(
vmull_s16(vget_low_s16(q8sums.1), vget_low_s16(q6scales.1)),
vmull_s16(vget_high_s16(q8sums.1), vget_high_s16(q6scales.1)),
),
);
let isum_mins = vaddvq_s32(prod);
let mut isum = 0i32;
for _j in 0..QK_K / 128 {
let qhbits = vld1q_u8_x2(qh);
qh = qh.add(32);
let q6bits = vld1q_u8_x4(q6);
q6 = q6.add(64);
let q8bytes = vld1q_s8_x4(q8);
q8 = q8.add(64);
let q6h_0 = vshlq_n_u8(vandq_u8(mone, qhbits.0), 4);
let q6h_1 = vshlq_n_u8(vandq_u8(mone, qhbits.1), 4);
let shifted = vshrq_n_u8(qhbits.0, 2);
let q6h_2 = vshlq_n_u8(vandq_u8(mone, shifted), 4);
let shifted = vshrq_n_u8(qhbits.1, 2);
let q6h_3 = vshlq_n_u8(vandq_u8(mone, shifted), 4);
let q6bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.0, m4b), q6h_0));
let q6bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.1, m4b), q6h_1));
let q6bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.2, m4b), q6h_2));
let q6bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.3, m4b), q6h_3));
let p0 = vdotq_s32(q6bytes_0, q8bytes.0);
let p1 = vdotq_s32(q6bytes_1, q8bytes.1);
let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32);
isum += vaddvq_s32(p0) * scale0 + vaddvq_s32(p1) * scale1;
scale = scale.add(2);
let p2 = vdotq_s32(q6bytes_2, q8bytes.2);
let p3 = vdotq_s32(q6bytes_3, q8bytes.3);
let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32);
isum += vaddvq_s32(p2) * scale0 + vaddvq_s32(p3) * scale1;
scale = scale.add(2);
let q8bytes = vld1q_s8_x4(q8);
q8 = q8.add(64);
let shifted = vshrq_n_u8(qhbits.0, 4);
let q6h_0 = vshlq_n_u8(vandq_u8(mone, shifted), 4);
let shifted = vshrq_n_u8(qhbits.1, 4);
let q6h_1 = vshlq_n_u8(vandq_u8(mone, shifted), 4);
let shifted = vshrq_n_u8(qhbits.0, 6);
let q6h_2 = vshlq_n_u8(vandq_u8(mone, shifted), 4);
let shifted = vshrq_n_u8(qhbits.1, 6);
let q6h_3 = vshlq_n_u8(vandq_u8(mone, shifted), 4);
let q6bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.0, 4), q6h_0));
let q6bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.1, 4), q6h_1));
let q6bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.2, 4), q6h_2));
let q6bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.3, 4), q6h_3));
let p0 = vdotq_s32(q6bytes_0, q8bytes.0);
let p1 = vdotq_s32(q6bytes_1, q8bytes.1);
let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32);
isum += vaddvq_s32(p0) * scale0 + vaddvq_s32(p1) * scale1;
scale = scale.add(2);
let p2 = vdotq_s32(q6bytes_2, q8bytes.2);
let p3 = vdotq_s32(q6bytes_3, q8bytes.3);
let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32);
isum += vaddvq_s32(p2) * scale0 + vaddvq_s32(p3) * scale1;
scale = scale.add(2);
}
sum += d_all * y.d * ((isum - 32 * isum_mins) as f32);
}
}
Ok(sum)
}
#[inline(always)]
pub(crate) fn vec_dot_q5k_q8k(n: usize, xs: &[BlockQ5K], ys: &[BlockQ8K]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q5k_q8k: {n} is not divisible by {QK_K}")
}
let mut sumf = 0f32;
let mut utmp = [0u32; 4];
const KMASK1: u32 = 0x3f3f3f3f;
const KMASK2: u32 = 0x0f0f0f0f;
const KMASK3: u32 = 0x03030303;
unsafe {
let m4b = vdupq_n_u8(0xF);
let mone = vdupq_n_u8(1);
let mtwo = vdupq_n_u8(2);
for (x, y) in xs.iter().zip(ys.iter()) {
let d = y.d * x.d.to_f32();
let dmin = y.d * x.dmin.to_f32();
let q8sums = vpaddq_s16(
vld1q_s16(y.bsums.as_ptr()),
vld1q_s16(y.bsums.as_ptr().add(8)),
);
LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]);
utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4);
let uaux = utmp[1] & KMASK1;
utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4);
utmp[2] = uaux;
utmp[0] &= KMASK1;
let mins8 = vld1_u8((utmp.as_ptr() as *const u8).add(8));
let mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
let prod = vaddq_s32(
vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)),
vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)),
);
let sumi_mins = vaddvq_s32(prod);
let mut scales = utmp.as_ptr() as *const u8;
let mut q5 = x.qs.as_ptr();
let mut q8 = y.qs.as_ptr();
let mut qhbits = vld1q_u8_x2(x.qh.as_ptr());
let mut sumi = 0i32;
for _j in 0..QK_K / 64 {
let q5bits = vld1q_u8_x2(q5);
q5 = q5.add(32);
let q8bytes = vld1q_s8_x4(q8);
q8 = q8.add(64);
let q5h_0 = vshlq_n_u8(vandq_u8(mone, qhbits.0), 4);
let q5h_1 = vshlq_n_u8(vandq_u8(mone, qhbits.1), 4);
let q5h_2 = vshlq_n_u8(vandq_u8(mtwo, qhbits.0), 3);
let q5h_3 = vshlq_n_u8(vandq_u8(mtwo, qhbits.1), 3);
qhbits.0 = vshrq_n_u8(qhbits.0, 2);
qhbits.1 = vshrq_n_u8(qhbits.1, 2);
let q5bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.0, m4b), q5h_0));
let q5bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.1, m4b), q5h_1));
let q5bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.0, 4), q5h_2));
let q5bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.1, 4), q5h_3));
let p0 = vdotq_s32(q5bytes_0, q8bytes.0);
let p1 = vdotq_s32(q5bytes_1, q8bytes.1);
sumi += vaddvq_s32(vaddq_s32(p0, p1)) * *scales as i32;
scales = scales.add(1);
let p2 = vdotq_s32(q5bytes_2, q8bytes.2);
let p3 = vdotq_s32(q5bytes_3, q8bytes.3);
sumi += vaddvq_s32(vaddq_s32(p2, p3)) * *scales as i32;
scales = scales.add(1);
}
sumf += d * sumi as f32 - dmin * sumi_mins as f32;
}
}
Ok(sumf)
}
#[inline(always)]
pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}")
}
let mut sumf = 0f32;
let mut utmp = [0u32; 4];
let mut scales = [0u8; 16];
const KMASK1: u32 = 0x3f3f3f3f;
const KMASK2: u32 = 0x0f0f0f0f;
const KMASK3: u32 = 0x03030303;
unsafe {
let m4b = vdupq_n_u8(0xF);
for (x, y) in xs.iter().zip(ys.iter()) {
let d = y.d * x.d.to_f32();
let dmin = y.d * x.dmin.to_f32();
let q8sums = vpaddq_s16(
vld1q_s16(y.bsums.as_ptr()),
vld1q_s16(y.bsums.as_ptr().add(8)),
);
LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]);
let mins8 = vld1_u32(
[
utmp[1] & KMASK1,
((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4),
]
.as_ptr(),
);
utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4);
utmp[0] &= KMASK1;
let mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
let prod = vaddq_s32(
vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)),
vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)),
);
sumf -= dmin * vaddvq_s32(prod) as f32;
LittleEndian::write_u32_into(&utmp, &mut scales);
let mut q4 = x.qs.as_ptr();
let mut q8 = y.qs.as_ptr();
let mut sumi1 = 0i32;
let mut sumi2 = 0i32;
for j in 0..QK_K / 64 {
let q4bits = vld1q_u8_x2(q4);
q4 = q4.add(32);
let q8bytes = vld1q_s8_x2(q8);
q8 = q8.add(32);
let q4bytes = int8x16x2_t(
vreinterpretq_s8_u8(vandq_u8(q4bits.0, m4b)),
vreinterpretq_s8_u8(vandq_u8(q4bits.1, m4b)),
);
let p0 = vdotq_s32(q4bytes.0, q8bytes.0);
let p1 = vdotq_s32(q4bytes.1, q8bytes.1);
sumi1 += vaddvq_s32(vaddq_s32(p0, p1)) * scales[2 * j] as i32;
let q8bytes = vld1q_s8_x2(q8);
q8 = q8.add(32);
let q4bytes = int8x16x2_t(
vreinterpretq_s8_u8(vshrq_n_u8(q4bits.0, 4)),
vreinterpretq_s8_u8(vshrq_n_u8(q4bits.1, 4)),
);
let p2 = vdotq_s32(q4bytes.0, q8bytes.0);
let p3 = vdotq_s32(q4bytes.1, q8bytes.1);
sumi2 += vaddvq_s32(vaddq_s32(p2, p3)) * scales[2 * j + 1] as i32;
}
sumf += d * (sumi1 + sumi2) as f32;
}
}
Ok(sumf)
}
#[inline(always)]
pub(crate) fn vec_dot_q3k_q8k(n: usize, xs: &[BlockQ3K], ys: &[BlockQ8K]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q3k_q8k: {n} is not divisible by {QK_K}")
}
let mut sumf = 0f32;
let mut utmp = [0u32; 4];
let mut aux = [0u32; 3];
const KMASK1: u32 = 0x03030303;
const KMASK2: u32 = 0x0f0f0f0f;
unsafe {
let m3b = vdupq_n_u8(0x3);
let m0 = vdupq_n_u8(1);
let m1 = vshlq_n_u8(m0, 1);
let m2 = vshlq_n_u8(m0, 2);
let m3 = vshlq_n_u8(m0, 3);
for (x, y) in xs.iter().zip(ys.iter()) {
let d = y.d * x.d.to_f32();
let mut q3 = x.qs.as_ptr();
let qh = x.hmask.as_ptr();
let mut q8 = y.qs.as_ptr();
let mut qhbits = vld1q_u8_x2(qh);
let mut isum = 0i32;
// Set up scales
LittleEndian::read_u32_into(&x.scales, &mut aux);
utmp[3] = ((aux[1] >> 4) & KMASK2) | (((aux[2] >> 6) & KMASK1) << 4);
utmp[2] = ((aux[0] >> 4) & KMASK2) | (((aux[2] >> 4) & KMASK1) << 4);
utmp[1] = (aux[1] & KMASK2) | (((aux[2] >> 2) & KMASK1) << 4);
utmp[0] = (aux[0] & KMASK2) | ((aux[2] & KMASK1) << 4);
let mut scale = utmp.as_mut_ptr() as *mut i8;
for j in 0..16 {
*scale.add(j) -= 32i8
}
for j in 0..QK_K / 128 {
let q3bits = vld1q_u8_x2(q3);
q3 = q3.add(32);
let q8bytes_1 = vld1q_s8_x4(q8);
q8 = q8.add(64);
let q8bytes_2 = vld1q_s8_x4(q8);
q8 = q8.add(64);
let q3h_0 = vshlq_n_u8(vbicq_u8(m0, qhbits.0), 2);
let q3h_1 = vshlq_n_u8(vbicq_u8(m0, qhbits.1), 2);
let q3h_2 = vshlq_n_u8(vbicq_u8(m1, qhbits.0), 1);
let q3h_3 = vshlq_n_u8(vbicq_u8(m1, qhbits.1), 1);
let q3bytes_0 = vsubq_s8(
vreinterpretq_s8_u8(vandq_u8(q3bits.0, m3b)),
vreinterpretq_s8_u8(q3h_0),
);
let q3bytes_1 = vsubq_s8(
vreinterpretq_s8_u8(vandq_u8(q3bits.1, m3b)),
vreinterpretq_s8_u8(q3h_1),
);
let q3bytes_2 = vsubq_s8(
vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 2), m3b)),
vreinterpretq_s8_u8(q3h_2),
);
let q3bytes_3 = vsubq_s8(
vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 2), m3b)),
vreinterpretq_s8_u8(q3h_3),
);
let p0 = vdotq_s32(q3bytes_0, q8bytes_1.0);
let p1 = vdotq_s32(q3bytes_1, q8bytes_1.1);
let p2 = vdotq_s32(q3bytes_2, q8bytes_1.2);
let p3 = vdotq_s32(q3bytes_3, q8bytes_1.3);
isum += vaddvq_s32(p0) * *scale as i32
+ vaddvq_s32(p1) * *scale.add(1) as i32
+ vaddvq_s32(p2) * *scale.add(2) as i32
+ vaddvq_s32(p3) * *scale.add(3) as i32;
scale = scale.add(4);
let q3h_0 = vbicq_u8(m2, qhbits.0);
let q3h_1 = vbicq_u8(m2, qhbits.1);
let q3h_2 = vshrq_n_u8(vbicq_u8(m3, qhbits.0), 1);
let q3h_3 = vshrq_n_u8(vbicq_u8(m3, qhbits.1), 1);
let q3bytes_0 = vsubq_s8(
vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 4), m3b)),
vreinterpretq_s8_u8(q3h_0),
);
let q3bytes_1 = vsubq_s8(
vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 4), m3b)),
vreinterpretq_s8_u8(q3h_1),
);
let q3bytes_2 = vsubq_s8(
vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 6), m3b)),
vreinterpretq_s8_u8(q3h_2),
);
let q3bytes_3 = vsubq_s8(
vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 6), m3b)),
vreinterpretq_s8_u8(q3h_3),
);
let p0 = vdotq_s32(q3bytes_0, q8bytes_2.0);
let p1 = vdotq_s32(q3bytes_1, q8bytes_2.1);
let p2 = vdotq_s32(q3bytes_2, q8bytes_2.2);
let p3 = vdotq_s32(q3bytes_3, q8bytes_2.3);
isum += vaddvq_s32(p0) * *scale as i32
+ vaddvq_s32(p1) * *scale.add(1) as i32
+ vaddvq_s32(p2) * *scale.add(2) as i32
+ vaddvq_s32(p3) * *scale.add(3) as i32;
scale = scale.add(4);
if j == 0 {
qhbits.0 = vshrq_n_u8(qhbits.0, 4);
qhbits.1 = vshrq_n_u8(qhbits.1, 4);
}
}
sumf += d * isum as f32;
}
}
Ok(sumf)
}
#[inline(always)]
pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> {
if n % QK_K != 0 {
crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}")
}
let mut sumf = 0f32;
let mut aux = [0u8; 16];
unsafe {
let m3 = vdupq_n_u8(0x3);
let m4 = vdupq_n_u8(0xF);
for (x, y) in xs.iter().zip(ys.iter()) {
let d = y.d * x.d.to_f32();
let dmin = -y.d * x.dmin.to_f32();
let mut q2 = x.qs.as_ptr();
let mut q8 = y.qs.as_ptr();
let sc = x.scales.as_ptr();
let mins_and_scales = vld1q_u8(sc);
let scales = vandq_u8(mins_and_scales, m4);
vst1q_u8(aux.as_mut_ptr(), scales);
let mins = vshrq_n_u8(mins_and_scales, 4);
let q8sums = vld1q_s16_x2(y.bsums.as_ptr());
let mins16 = int16x8x2_t(
vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))),
vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins))),
);
let s0 = vaddq_s32(
vmull_s16(vget_low_s16(mins16.0), vget_low_s16(q8sums.0)),
vmull_s16(vget_high_s16(mins16.0), vget_high_s16(q8sums.0)),
);
let s1 = vaddq_s32(
vmull_s16(vget_low_s16(mins16.1), vget_low_s16(q8sums.1)),
vmull_s16(vget_high_s16(mins16.1), vget_high_s16(q8sums.1)),
);
sumf += dmin * vaddvq_s32(vaddq_s32(s0, s1)) as f32;
let mut isum = 0i32;
let mut is = 0usize;
// TODO: dotprod
for _j in 0..QK_K / 128 {
let q2bits = vld1q_u8_x2(q2);
q2 = q2.add(32);
let q8bytes = vld1q_s8_x2(q8);
q8 = q8.add(32);
let mut q2bytes = int8x16x2_t(
vreinterpretq_s8_u8(vandq_u8(q2bits.0, m3)),
vreinterpretq_s8_u8(vandq_u8(q2bits.1, m3)),
);
isum += multiply_accum_with_scale(&aux, is, 0, q2bytes, q8bytes);
let q8bytes = vld1q_s8_x2(q8);
q8 = q8.add(32);
q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 2), m3));
q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 2), m3));
isum += multiply_accum_with_scale(&aux, is, 2, q2bytes, q8bytes);
let q8bytes = vld1q_s8_x2(q8);
q8 = q8.add(32);
q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 4), m3));
q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 4), m3));
isum += multiply_accum_with_scale(&aux, is, 4, q2bytes, q8bytes);
let q8bytes = vld1q_s8_x2(q8);
q8 = q8.add(32);
q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 6), m3));
q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 6), m3));
isum += multiply_accum_with_scale(&aux, is, 6, q2bytes, q8bytes);
is += 8;
}
sumf += d * isum as f32;
}
}
Ok(sumf)
}
#[inline(always)]
unsafe fn multiply_accum_with_scale(
aux: &[u8; 16],
is: usize,
index: usize,
q2bytes: int8x16x2_t,
q8bytes: int8x16x2_t,
) -> i32 {
let p1 = vdotq_s32(q2bytes.0, q8bytes.0);
let p2 = vdotq_s32(q2bytes.1, q8bytes.1);
vaddvq_s32(p1) * aux[is + index] as i32 + vaddvq_s32(p2) * aux[is + 1 + index] as i32
}
| candle/candle-core/src/quantized/neon.rs/0 | {
"file_path": "candle/candle-core/src/quantized/neon.rs",
"repo_id": "candle",
"token_count": 15290
} | 18 |
use anyhow::Result;
use candle_core::{Device, IndexOp, Tensor};
#[test]
fn integer_index() -> Result<()> {
let dev = Device::Cpu;
let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?;
let result = tensor.i(1)?;
assert_eq!(result.dims(), &[3]);
assert_eq!(result.to_vec1::<u32>()?, &[3, 4, 5]);
let result = tensor.i((.., 2))?;
assert_eq!(result.dims(), &[2]);
assert_eq!(result.to_vec1::<u32>()?, &[2, 5]);
Ok(())
}
#[test]
fn range_index() -> Result<()> {
let dev = Device::Cpu;
// RangeFull
let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?;
let result = tensor.i(..)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]);
// Range
let tensor = Tensor::arange(0u32, 4 * 3, &dev)?.reshape((4, 3))?;
let result = tensor.i(1..3)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]);
// RangeFrom
let result = tensor.i(2..)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[6, 7, 8], [9, 10, 11]]);
// RangeTo
let result = tensor.i(..2)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]);
// RangeInclusive
let result = tensor.i(1..=2)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]);
// RangeTo
let result = tensor.i(..1)?;
assert_eq!(result.dims(), &[1, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2]]);
// RangeToInclusive
let result = tensor.i(..=1)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]);
// Empty range
let result = tensor.i(1..1)?;
assert_eq!(result.dims(), &[0, 3]);
let empty: [[u32; 3]; 0] = [];
assert_eq!(result.to_vec2::<u32>()?, &empty);
// Similar to PyTorch, allow empty ranges when the computed length is negative.
#[allow(clippy::reversed_empty_ranges)]
let result = tensor.i(1..0)?;
assert_eq!(result.dims(), &[0, 3]);
let empty: [[u32; 3]; 0] = [];
assert_eq!(result.to_vec2::<u32>()?, &empty);
Ok(())
}
#[test]
fn index_3d() -> Result<()> {
let tensor = Tensor::from_iter(0..24u32, &Device::Cpu)?.reshape((2, 3, 4))?;
assert_eq!(tensor.i((0, 0, 0))?.to_scalar::<u32>()?, 0);
assert_eq!(tensor.i((1, 0, 0))?.to_scalar::<u32>()?, 12);
assert_eq!(tensor.i((0, 1, 0))?.to_scalar::<u32>()?, 4);
assert_eq!(tensor.i((0, 1, 3))?.to_scalar::<u32>()?, 7);
assert_eq!(tensor.i((0..2, 0, 0))?.to_vec1::<u32>()?, &[0, 12]);
assert_eq!(
tensor.i((0..2, .., 0))?.to_vec2::<u32>()?,
&[[0, 4, 8], [12, 16, 20]]
);
assert_eq!(
tensor.i((..2, .., 3))?.to_vec2::<u32>()?,
&[[3, 7, 11], [15, 19, 23]]
);
assert_eq!(tensor.i((1, .., 3))?.to_vec1::<u32>()?, &[15, 19, 23]);
Ok(())
}
#[test]
fn slice_assign() -> Result<()> {
let dev = Device::Cpu;
let tensor = Tensor::arange(0u32, 4 * 5, &dev)?.reshape((4, 5))?;
let src = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((3, 2))?;
let out = tensor.slice_assign(&[1..4, 3..5], &src)?;
assert_eq!(
out.to_vec2::<u32>()?,
&[
[0, 1, 2, 3, 4],
[5, 6, 7, 0, 1],
[10, 11, 12, 2, 3],
[15, 16, 17, 4, 5]
]
);
let out = tensor.slice_assign(&[0..3, 0..2], &src)?;
assert_eq!(
out.to_vec2::<u32>()?,
&[
[0, 1, 2, 3, 4],
[2, 3, 7, 8, 9],
[4, 5, 12, 13, 14],
[15, 16, 17, 18, 19]
]
);
Ok(())
}
| candle/candle-core/tests/indexing_tests.rs/0 | {
"file_path": "candle/candle-core/tests/indexing_tests.rs",
"repo_id": "candle",
"token_count": 1994
} | 19 |
//! The CIFAR-10 dataset.
//!
//! The files can be downloaded from the following page:
//! <https://www.cs.toronto.edu/~kriz/cifar.html>
//! The binary version of the dataset is used.
use crate::vision::Dataset;
use candle::{DType, Device, Error, Result, Tensor};
use hf_hub::{api::sync::Api, Repo, RepoType};
use parquet::file::reader::{FileReader, SerializedFileReader};
use std::fs::File;
use std::io::{BufReader, Read};
const W: usize = 32;
const H: usize = 32;
const C: usize = 3;
const BYTES_PER_IMAGE: usize = W * H * C + 1;
const SAMPLES_PER_FILE: usize = 10000;
fn read_file(filename: &std::path::Path) -> Result<(Tensor, Tensor)> {
let mut buf_reader = BufReader::new(File::open(filename)?);
let mut data = vec![0u8; SAMPLES_PER_FILE * BYTES_PER_IMAGE];
buf_reader.read_exact(&mut data)?;
let mut images = vec![];
let mut labels = vec![];
for index in 0..SAMPLES_PER_FILE {
let content_offset = BYTES_PER_IMAGE * index;
labels.push(data[content_offset]);
images.push(&data[1 + content_offset..content_offset + BYTES_PER_IMAGE]);
}
let images: Vec<u8> = images
.iter()
.copied()
.flatten()
.copied()
.collect::<Vec<_>>();
let labels = Tensor::from_vec(labels, SAMPLES_PER_FILE, &Device::Cpu)?;
let images = Tensor::from_vec(images, (SAMPLES_PER_FILE, C, H, W), &Device::Cpu)?;
let images = (images.to_dtype(DType::F32)? / 255.)?;
Ok((images, labels))
}
pub fn load_dir<T: AsRef<std::path::Path>>(dir: T) -> Result<Dataset> {
let dir = dir.as_ref();
let (test_images, test_labels) = read_file(&dir.join("test_batch.bin"))?;
let train_images_and_labels = [
"data_batch_1.bin",
"data_batch_2.bin",
"data_batch_3.bin",
"data_batch_4.bin",
"data_batch_5.bin",
]
.iter()
.map(|x| read_file(&dir.join(x)))
.collect::<Result<Vec<_>>>()?;
let (train_images, train_labels): (Vec<_>, Vec<_>) =
train_images_and_labels.into_iter().unzip();
Ok(Dataset {
train_images: Tensor::cat(&train_images, 0)?,
train_labels: Tensor::cat(&train_labels, 0)?,
test_images,
test_labels,
labels: 10,
})
}
fn load_parquet(parquet: SerializedFileReader<std::fs::File>) -> Result<(Tensor, Tensor)> {
let samples = parquet.metadata().file_metadata().num_rows() as usize;
let mut buffer_images: Vec<u8> = Vec::with_capacity(samples * 1_024);
let mut buffer_labels: Vec<u8> = Vec::with_capacity(samples);
for row in parquet.into_iter().flatten() {
for (_name, field) in row.get_column_iter() {
if let parquet::record::Field::Group(subrow) = field {
for (_name, field) in subrow.get_column_iter() {
if let parquet::record::Field::Bytes(value) = field {
let image = image::load_from_memory(value.data()).unwrap();
buffer_images.extend(image.to_rgb8().as_raw());
}
}
} else if let parquet::record::Field::Long(label) = field {
buffer_labels.push(*label as u8);
}
}
}
let images = (Tensor::from_vec(buffer_images, (samples, 3, 32, 32), &Device::Cpu)?
.to_dtype(DType::U8)?
/ 255.)?;
let labels = Tensor::from_vec(buffer_labels, (samples,), &Device::Cpu)?;
Ok((images, labels))
}
pub fn load() -> Result<Dataset> {
let api = Api::new().map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let dataset_id = "cifar10".to_string();
let repo = Repo::with_revision(
dataset_id,
RepoType::Dataset,
"refs/convert/parquet".to_string(),
);
let repo = api.repo(repo);
let test_parquet_filename = repo
.get("plain_text/test/0000.parquet")
.map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let train_parquet_filename = repo
.get("plain_text/train/0000.parquet")
.map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)
.map_err(|e| Error::Msg(format!("Parquet error: {e}")))?;
let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)
.map_err(|e| Error::Msg(format!("Parquet error: {e}")))?;
let (test_images, test_labels) = load_parquet(test_parquet)?;
let (train_images, train_labels) = load_parquet(train_parquet)?;
Ok(crate::vision::Dataset {
train_images,
train_labels,
test_images,
test_labels,
labels: 10,
})
}
| candle/candle-datasets/src/vision/cifar.rs/0 | {
"file_path": "candle/candle-datasets/src/vision/cifar.rs",
"repo_id": "candle",
"token_count": 2139
} | 20 |
// This example illustrates how to implement custom operations. These operations can provide their
// own forward pass (CPU and GPU versions) as well as their backward pass.
//
// In this example we add the RMS normalization operation and implement it for f32.
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[rustfmt::skip]
#[cfg(feature = "cuda")]
mod cuda_kernels;
use clap::Parser;
use candle::{CpuStorage, CustomOp1, Layout, Result, Shape, Tensor};
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
}
struct LayerNorm {
eps: f32,
}
impl CustomOp1 for LayerNorm {
fn name(&self) -> &'static str {
"layer-norm"
}
fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> {
let (dim1, dim2) = layout.shape().dims2()?;
let slice = storage.as_slice::<f32>()?;
let src = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => &slice[o1..o2],
};
let mut dst = Vec::with_capacity(dim1 * dim2);
for idx1 in 0..dim1 {
let src = &src[idx1 * dim2..(idx1 + 1) * dim2];
let variance = src.iter().map(|x| x * x).sum::<f32>();
let s_variance = 1f32 / (variance / dim2 as f32 + self.eps).sqrt();
dst.extend(src.iter().map(|x| x * s_variance))
}
let storage = candle::WithDType::to_cpu_storage_owned(dst);
Ok((storage, layout.shape().clone()))
}
#[cfg(feature = "cuda")]
fn cuda_fwd(
&self,
storage: &candle::CudaStorage,
layout: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
use candle::backend::BackendStorage;
use candle::cuda_backend::cudarc::driver::{LaunchAsync, LaunchConfig};
use candle::cuda_backend::WrapErr;
let (d1, d2) = layout.shape().dims2()?;
let d1 = d1 as u32;
let d2 = d2 as u32;
let dev = storage.device().clone();
let slice = storage.as_cuda_slice::<f32>()?;
let slice = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => slice.slice(o1..o2),
};
let elem_count = layout.shape().elem_count();
let dst = unsafe { dev.alloc::<f32>(elem_count) }.w()?;
let func = dev.get_or_load_func("rms_f32", cuda_kernels::LAYERNORM_KERNELS)?;
let params = (&dst, &slice, self.eps, d1, d2);
let cfg = LaunchConfig {
grid_dim: (d1, 1, 1),
block_dim: (d2, 1, 1),
shared_mem_bytes: 0,
};
unsafe { func.launch(cfg, params) }.w()?;
let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev);
Ok((dst, layout.shape().clone()))
}
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let t = Tensor::arange(0f32, 14f32, &device)?.reshape((2, 7))?;
println!("{t}");
let t = t.apply_op1(LayerNorm { eps: 1e-5 })?;
println!("{t}");
Ok(())
}
| candle/candle-examples/examples/custom-ops/main.rs/0 | {
"file_path": "candle/candle-examples/examples/custom-ops/main.rs",
"repo_id": "candle",
"token_count": 1475
} | 21 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::{Error as E, Result};
use clap::{Parser, ValueEnum};
mod model;
use model::{Config, Model};
use candle::{DType, Device, Module, Tensor};
use candle_examples::token_output_stream::TokenOutputStream;
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::Tokenizer;
struct TextGeneration {
model: Model,
device: Device,
tokenizer: TokenOutputStream,
logits_processor: LogitsProcessor,
repeat_penalty: f32,
repeat_last_n: usize,
}
impl TextGeneration {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
seed: u64,
temp: Option<f64>,
top_p: Option<f64>,
repeat_penalty: f32,
repeat_last_n: usize,
device: &Device,
) -> Self {
let logits_processor = LogitsProcessor::new(seed, temp, top_p);
Self {
model,
tokenizer: TokenOutputStream::new(tokenizer),
logits_processor,
repeat_penalty,
repeat_last_n,
device: device.clone(),
}
}
fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> {
use std::io::Write;
self.tokenizer.clear();
let mut tokens = self
.tokenizer
.tokenizer()
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
for &t in tokens.iter() {
if let Some(t) = self.tokenizer.next_token(t)? {
print!("{t}")
}
}
std::io::stdout().flush()?;
let mut generated_tokens = 0usize;
let eos_token = match self.tokenizer.get_token("<|endoftext|>") {
Some(token) => token,
None => anyhow::bail!("cannot find the </s> token"),
};
let start_gen = std::time::Instant::now();
for _ in 0..sample_len {
let input = Tensor::new(tokens.as_slice(), &self.device)?.unsqueeze(0)?;
let logits = self.model.forward(&input)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let logits = if self.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(self.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
self.repeat_penalty,
&tokens[start_at..],
)?
};
let next_token = self.logits_processor.sample(&logits)?;
tokens.push(next_token);
generated_tokens += 1;
if next_token == eos_token {
break;
}
if let Some(t) = self.tokenizer.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
}
let dt = start_gen.elapsed();
if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
std::io::stdout().flush()?;
println!(
"\n{generated_tokens} tokens generated ({:.2} token/s)",
generated_tokens as f64 / dt.as_secs_f64(),
);
Ok(())
}
}
#[derive(Parser, ValueEnum, Clone, Copy, PartialEq, Eq, Debug)]
enum Which {
Mamba130m,
Mamba370m,
Mamba790m,
Mamba1_4b,
Mamba2_8b,
Mamba2_8bSlimPj,
}
impl std::fmt::Display for Which {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
impl Which {
fn model_id(&self) -> &'static str {
match self {
Self::Mamba130m => "state-spaces/mamba-130m",
Self::Mamba370m => "state-spaces/mamba-370m",
Self::Mamba790m => "state-spaces/mamba-790m",
Self::Mamba1_4b => "state-spaces/mamba-1.4b",
Self::Mamba2_8b => "state-spaces/mamba-2.8b",
Self::Mamba2_8bSlimPj => "state-spaces/mamba-2.8b-slimpj'",
}
}
fn revision(&self) -> &'static str {
match self {
Self::Mamba130m
| Self::Mamba370m
| Self::Mamba790m
| Self::Mamba1_4b
| Self::Mamba2_8bSlimPj => "refs/pr/1",
Self::Mamba2_8b => "refs/pr/4",
}
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
prompt: String,
/// The temperature used to generate samples.
#[arg(long)]
temperature: Option<f64>,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The length of the sample to generate (in tokens).
#[arg(long, short = 'n', default_value_t = 5000)]
sample_len: usize,
#[arg(long, default_value = "mamba130m")]
which: Which,
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
weight_files: Option<String>,
#[arg(long)]
config_file: Option<String>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
println!(
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
args.temperature.unwrap_or(0.),
args.repeat_penalty,
args.repeat_last_n
);
let start = std::time::Instant::now();
let api = Api::new()?;
let repo = api.repo(Repo::with_revision(
args.model_id
.unwrap_or_else(|| args.which.model_id().to_string()),
RepoType::Model,
args.revision
.unwrap_or_else(|| args.which.revision().to_string()),
));
let tokenizer_filename = match args.tokenizer_file {
Some(file) => std::path::PathBuf::from(file),
None => api
.model("EleutherAI/gpt-neox-20b".to_string())
.get("tokenizer.json")?,
};
let config_filename = match args.config_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("config.json")?,
};
let filenames = match args.weight_files {
Some(files) => files
.split(',')
.map(std::path::PathBuf::from)
.collect::<Vec<_>>(),
None => {
vec![repo.get("model.safetensors")?]
}
};
println!("retrieved the files in {:?}", start.elapsed());
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let start = std::time::Instant::now();
let config: Config = serde_json::from_slice(&std::fs::read(config_filename)?)?;
let device = candle_examples::device(args.cpu)?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? };
let model = Model::new(&config, vb.pp("backbone"))?;
println!("loaded the model in {:?}", start.elapsed());
let mut pipeline = TextGeneration::new(
model,
tokenizer,
args.seed,
args.temperature,
args.top_p,
args.repeat_penalty,
args.repeat_last_n,
&device,
);
pipeline.run(&args.prompt, args.sample_len)?;
Ok(())
}
| candle/candle-examples/examples/mamba-minimal/main.rs/0 | {
"file_path": "candle/candle-examples/examples/mamba-minimal/main.rs",
"repo_id": "candle",
"token_count": 4087
} | 22 |
## Using ONNX models in Candle
This example demonstrates how to run ONNX based models in Candle, the model
being used here is a small sequeezenet variant.
You can run the example with the following command:
```bash
cargo run --example squeezenet-onnx --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg
```
| candle/candle-examples/examples/onnx/README.md/0 | {
"file_path": "candle/candle-examples/examples/onnx/README.md",
"repo_id": "candle",
"token_count": 97
} | 23 |
#![allow(unused)]
//! Vectorized version of the gym environment.
use candle::{DType, Device, Result, Tensor};
use pyo3::prelude::*;
use pyo3::types::PyDict;
#[derive(Debug)]
pub struct Step {
pub obs: Tensor,
pub reward: Tensor,
pub is_done: Tensor,
}
pub struct VecGymEnv {
env: PyObject,
action_space: usize,
observation_space: Vec<usize>,
}
fn w(res: PyErr) -> candle::Error {
candle::Error::wrap(res)
}
impl VecGymEnv {
pub fn new(name: &str, img_dir: Option<&str>, nprocesses: usize) -> Result<VecGymEnv> {
Python::with_gil(|py| {
let sys = py.import("sys")?;
let path = sys.getattr("path")?;
let _ = path.call_method1(
"append",
("candle-examples/examples/reinforcement-learning",),
)?;
let gym = py.import("atari_wrappers")?;
let make = gym.getattr("make")?;
let env = make.call1((name, img_dir, nprocesses))?;
let action_space = env.getattr("action_space")?;
let action_space = action_space.getattr("n")?.extract()?;
let observation_space = env.getattr("observation_space")?;
let observation_space: Vec<usize> = observation_space.getattr("shape")?.extract()?;
let observation_space =
[vec![nprocesses].as_slice(), observation_space.as_slice()].concat();
Ok(VecGymEnv {
env: env.into(),
action_space,
observation_space,
})
})
.map_err(w)
}
pub fn reset(&self) -> Result<Tensor> {
let obs = Python::with_gil(|py| {
let obs = self.env.call_method0(py, "reset")?;
let obs = obs.call_method0(py, "flatten")?;
obs.extract::<Vec<f32>>(py)
})
.map_err(w)?;
Tensor::new(obs, &Device::Cpu)?.reshape(self.observation_space.as_slice())
}
pub fn step(&self, action: Vec<usize>) -> Result<Step> {
let (obs, reward, is_done) = Python::with_gil(|py| {
let step = self.env.call_method(py, "step", (action,), None)?;
let step = step.as_ref(py);
let obs = step.get_item(0)?.call_method("flatten", (), None)?;
let obs_buffer = pyo3::buffer::PyBuffer::get(obs)?;
let obs: Vec<u8> = obs_buffer.to_vec(py)?;
let reward: Vec<f32> = step.get_item(1)?.extract()?;
let is_done: Vec<f32> = step.get_item(2)?.extract()?;
Ok((obs, reward, is_done))
})
.map_err(w)?;
let obs = Tensor::from_vec(obs, self.observation_space.as_slice(), &Device::Cpu)?
.to_dtype(DType::F32)?;
let reward = Tensor::new(reward, &Device::Cpu)?;
let is_done = Tensor::new(is_done, &Device::Cpu)?;
Ok(Step {
obs,
reward,
is_done,
})
}
pub fn action_space(&self) -> usize {
self.action_space
}
pub fn observation_space(&self) -> &[usize] {
&self.observation_space
}
}
| candle/candle-examples/examples/reinforcement-learning/vec_gym_env.rs/0 | {
"file_path": "candle/candle-examples/examples/reinforcement-learning/vec_gym_env.rs",
"repo_id": "candle",
"token_count": 1563
} | 24 |
# candle-stable-lm
StableLM-3B-4E1T is a 3 billion parameter decoder-only language model
pre-trained on 1 trillion tokens of diverse English and code datasets for 4
epochs. See the [HuggingFace Hub Model
Card](https://huggingface.co/stabilityai/stablelm-3b-4e1t).
Note that this model is gated so you will have to request access on the Hub in
order to be able to use it.
## Running some example
```bash
$ cargo run --example stable-lm --release --features cuda -- --prompt 'What is the most efficient programming language in use?' --sample-len 150
avx: true, neon: false, simd128: false, f16c: true
temp: 0.00 repeat-penalty: 1.10 repeat-last-n: 64
retrieved the files in 126.593µs
loaded the model in 3.474148965s
What is the most efficient programming language in use?
The answer to this question depends on what you mean by "efficient". If you're talking about speed, then C++ and Java are probably your best bets. But if you're talking about ease of development, then Python is probably the way to go.
Python is a high-level, interpreted language that is easy to learn and use. It has a large community of developers who are always working on new features and improvements.
C++ is a low-level, compiled language that can be used for both desktop applications and web development. It's more difficult to learn than Python but offers greater control over the code.
Java is another high-level language that is popular with programmers because it runs on many different platforms (including Android phones
150 tokens generated (37.61 token/s)
```
| candle/candle-examples/examples/stable-lm/README.md/0 | {
"file_path": "candle/candle-examples/examples/stable-lm/README.md",
"repo_id": "candle",
"token_count": 407
} | 25 |
// Pytorch also has an implementation of Philox RNG: https://github.com/pytorch/pytorch/blob/8ca3c881db3e3510fcb7725389f6a0633c9b992c/torch/csrc/jit/tensorexpr/cuda_random.h
#pragma once
// Philox CUDA.
namespace flash {
struct ull2 {
unsigned long long x;
unsigned long long y;
};
inline __device__ uint2 mulhilo32(const unsigned int a, const unsigned int b) {
uint2 *res;
unsigned long long tmp;
asm ("mul.wide.u32 %0, %1, %2;\n\t"
: "=l"(tmp)
: "r"(a), "r"(b));
res = (uint2*)(&tmp);
return *res;
}
inline __device__ uint4 philox_single_round(const uint4 ctr, const uint2 key) {
constexpr unsigned long kPhiloxSA = 0xD2511F53;
constexpr unsigned long kPhiloxSB = 0xCD9E8D57;
uint2 res0 = mulhilo32(kPhiloxSA, ctr.x);
uint2 res1 = mulhilo32(kPhiloxSB, ctr.z);
uint4 ret = {res1.y ^ ctr.y ^ key.x, res1.x, res0.y ^ ctr.w ^ key.y, res0.x};
return ret;
}
inline __device__ uint4 philox(unsigned long long seed,
unsigned long long subsequence,
unsigned long long offset) {
constexpr unsigned long kPhilox10A = 0x9E3779B9;
constexpr unsigned long kPhilox10B = 0xBB67AE85;
uint2 key = reinterpret_cast<uint2&>(seed);
uint4 counter;
ull2 *tmp = reinterpret_cast<ull2*>(&counter);
tmp->x = offset;
tmp->y = subsequence;
#pragma unroll
for (int i = 0; i < 6; i++) {
counter = philox_single_round(counter, key);
key.x += (kPhilox10A);
key.y += (kPhilox10B);
}
uint4 output = philox_single_round(counter, key);
return output;
}
} // namespace flash
namespace {
class Philox {
public:
__device__ inline Philox(unsigned long long seed,
unsigned long long subsequence,
unsigned long long offset)
: STATE(0)
, seed_(seed)
, offset_(offset)
, key(reinterpret_cast<const uint2&>(seed)) {
//key.x = (unsigned int)seed;
//key.y = (unsigned int)(seed >> 32);
//counter = make_uint4(0, 0, 0, 0);
//counter.z = (unsigned int)(subsequence);
//counter.w = (unsigned int)(subsequence >> 32);
//STATE = 0;
//incr_n(offset / 4);
// key = reinterpret_cast<const uint2&>(seed);
ull2 * tmp = reinterpret_cast<ull2*>(&counter);
tmp->x = offset / 4;
tmp->y = subsequence;
// if ((threadIdx.x == 0) && (blockIdx.x == 0) && (blockIdx.y == 0)) {
// printf("Philox counter: %d, %d, %d, %d\n", counter.x, counter.y, counter.z, counter.w);
// }
}
__device__ inline uint4 operator()() {
// // if (STATE == 0) {
// uint4 counter_ = counter;
// uint2 key_ = key;
// // 7-round philox
// #pragma unroll
// for (int i = 0; i < 6; i++) {
// counter_ = flash::philox_single_round(counter_, key_);
// key_.x += (kPhilox10A);
// key_.y += (kPhilox10B);
// }
// // output = philox_single_round(counter_, key_);
// uint4 output = flash::philox_single_round(counter_, key_);
// // if ((threadIdx.x == 0) && (blockIdx.x == 0) && (blockIdx.y == 0)) {
// // printf("Philox counter: %u, %u, %u, %u\n", counter.x, counter.y, counter.z, counter.w);
// // printf("Philox output: %u, %u, %u, %u\n", output.x, output.y, output.z, output.w);
// // }
// incr();
// // }
// // return a float4 directly
// // unsigned long ret;
// // switch(STATE) {
// // case 0: ret = output.x; break;
// // case 1: ret = output.y; break;
// // case 2: ret = output.z; break;
// // case 3: ret = output.w; break;
// //}
// // STATE = (STATE + 1) % 4;
// return output;
return flash::philox(seed_, offset_, offset_);
}
private:
unsigned long long offset_, seed_;
struct ull2 {
uint64_t x;
uint64_t y;
};
uint4 counter;
// uint4 output;
const uint2 key;
unsigned int STATE;
__device__ inline void incr_n(unsigned long long n) {
unsigned int nlo = (unsigned int)(n);
unsigned int nhi = (unsigned int)(n >> 32);
counter.x += nlo;
if (counter.x < nlo)
nhi++;
counter.y += nhi;
if (nhi <= counter.y)
return;
if (++counter.z)
return;
++counter.w;
}
__device__ uint4 incr128 (uint4 ctr)
{
uint4 res;
asm ("add.cc.u32 %0, %4, %8;\n\t"
"addc.cc.u32 %1, %5, %9;\n\t"
"addc.cc.u32 %2, %6, %10;\n\t"
"addc.u32 %3, %7, %11;\n\t"
: "=r"(res.x), "=r"(res.y), "=r"(res.z), "=r"(res.w)
: "r"(ctr.x), "r"(ctr.y), "r"(ctr.z), "r"(ctr.w),
"n"(1), "n"(0), "n"(0), "n"(0));
return res;
}
__device__ inline void incr() {
// if ((threadIdx.x == 0) && (blockIdx.x == 0) && (blockIdx.y == 0)) {
// printf("Counter before: %u, %u, %u, %u\n", counter.x, counter.y, counter.z, counter.w);
// }
counter = incr128(counter);
// if ((threadIdx.x == 0) && (blockIdx.x == 0) && (blockIdx.y == 0)) {
// printf("Counter after: %u, %u, %u, %u\n", counter.x, counter.y, counter.z, counter.w);
// }
}
static const unsigned long kPhilox10A = 0x9E3779B9;
static const unsigned long kPhilox10B = 0xBB67AE85;
// static const unsigned long kPhiloxSA = 0xD2511F53;
// static const unsigned long kPhiloxSB = 0xCD9E8D57;
};
} // namespace
| candle/candle-flash-attn/kernels/philox.cuh/0 | {
"file_path": "candle/candle-flash-attn/kernels/philox.cuh",
"repo_id": "candle",
"token_count": 2511
} | 26 |
#include "compatibility.cuh"
#include<stdint.h>
#include<cmath>
// TODO: This is often used to check that the data is contiguous so that
// kernels can be easily mapped. However this only returns true for row
// major, if all the inputs are column major, we could apply the fast path
// too (but we wouldn't if some of them are row major and some column major).
__device__ bool is_contiguous(
const size_t num_dims,
const size_t *dims,
const size_t *strides
) {
size_t acc = 1;
for (unsigned int d = 0; d < num_dims; d++) {
unsigned int dim_idx = num_dims - 1 - d;
if (acc != strides[dim_idx]) {
return false;
}
acc *= dims[dim_idx];
}
return true;
}
__device__ unsigned int get_strided_index(
unsigned int idx,
const size_t num_dims,
const size_t *dims,
const size_t *strides
) {
unsigned int strided_i = 0;
for (unsigned int d = 0; d < num_dims; d++) {
unsigned int dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
__device__ unsigned int restrided(
const unsigned int strided_i,
const size_t num_dims,
const size_t *dims,
const size_t *strides,
const size_t *new_strides
) {
unsigned int idx = 0;
for (int d = 0; d < num_dims; d++) {
idx += (strides[d] == 0 ? 0 : (strided_i / strides[d]) % dims[d]) * new_strides[d];
}
return idx;
}
// Sourced from https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
// Input must be less than or equal to 2 ^ 16
// used in reductions
__device__ __forceinline__ unsigned int next_power_of_two(unsigned int v) {
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v++;
return v;
}
// Efficiently computes the sum of each chunk in "data" of size chunk_len, and
// stores the sums in out[i / chunk_len]
template<typename T>
__device__ void chunk_sum(
const size_t chunk_len,
const T data,
T* out
) {
__shared__ T buf[1024];
// assumes that threads where i >= numel have already exited
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int block_i = threadIdx.x;
// Fall back to atomicAdd if chunk_len is small to reduce overhead
if (chunk_len <= 2) {
atomicAdd(out + i / chunk_len, data);
return;
}
buf[block_i] = data;
unsigned int chunk_i = i % chunk_len;
unsigned int chunk_start = max((int)(block_i - chunk_i), 0);
unsigned int chunk_end = min((unsigned int)(block_i + chunk_len - chunk_i), blockDim.x);
chunk_i = block_i - chunk_start;
size_t max_chunk_len = min(chunk_end - chunk_start, blockDim.x);
size_t incr = next_power_of_two(max_chunk_len) >> 1;
__syncthreads();
// Uses sequential addressing as discussed in
// https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
for (; incr > 0; incr >>= 1) {
unsigned int block_i_2 = block_i + incr;
if (block_i_2 < chunk_end && chunk_i < incr) {
// This is sound because __syncthreads and the conditions above
// ensure that no data races occur
buf[block_i] += buf[block_i_2];
}
__syncthreads();
}
if (block_i == chunk_start) {
atomicAdd(out + i / chunk_len, buf[block_i]);
}
}
__device__ __forceinline__ bool isnang(float a) { return isnan(a); }
__device__ __forceinline__ bool isnang(double a) { return isnan(a); }
__device__ __forceinline__ float recipg(float a) { return 1.0 / a; }
__device__ __forceinline__ double recipg(double a) { return 1.0 / a; }
__device__ __forceinline__ float cosg(float a) { return cosf(a); }
__device__ __forceinline__ double cosg(double a) { return cos(a); }
__device__ __forceinline__ float sing(float a) { return sinf(a); }
__device__ __forceinline__ double sing(double a) { return sin(a); }
__device__ __forceinline__ float sqrtg(float a) { return sqrtf(a); }
__device__ __forceinline__ double sqrtg(double a) { return sqrt(a); }
__device__ __forceinline__ float powg(float a, float b) { return powf(a, b); }
__device__ __forceinline__ double powg(double a, double b) { return pow(a, b); }
__device__ __forceinline__ float tanhg(float a) { return tanhf(a); }
__device__ __forceinline__ double tanhg(double a) { return tanh(a); }
__device__ __forceinline__ float erfg(float a) { return erff(a); }
__device__ __forceinline__ double erfg(double a) { return erf(a); }
__device__ __forceinline__ float ceilg(float a) { return ceilf(a); }
__device__ __forceinline__ double ceilg(double a) { return ceil(a); }
__device__ __forceinline__ float floorg(float a) { return floorf(a); }
__device__ __forceinline__ double floorg(double a) { return floor(a); }
__device__ __forceinline__ float roundg(float a) { return roundf(a); }
__device__ __forceinline__ double roundg(double a) { return round(a); }
__device__ __forceinline__ float normcdfg(float a) { return normcdff(a); }
__device__ __forceinline__ double normcdfg(double a) { return normcdf(a); }
__device__ __forceinline__ float maxg(float a, float b) { return fmaxf(a, b); }
__device__ __forceinline__ double maxg(double a, double b) { return fmax(a, b); }
__device__ __forceinline__ float ming(float a, float b) { return fminf(a, b); }
__device__ __forceinline__ double ming(double a, double b) { return fmin(a, b); }
__device__ __forceinline__ float logg(float a) { return logf(a); }
__device__ __forceinline__ double logg(double a) { return log(a); }
__device__ __forceinline__ float expg(float a) { return expf(a); }
__device__ __forceinline__ double expg(double a) { return exp(a); }
__device__ __forceinline__ float absg(float a) { return fabsf(a); }
__device__ __forceinline__ double absg(double a) { return fabs(a); }
__device__ __forceinline__ float copysigng(float a, float b) { return copysignf(a, b); }
__device__ __forceinline__ double copysigng(double a, double b) { return copysign(a, b); }
__device__ __forceinline__ int64_t ming(int64_t a, int64_t b) { return min(a, b); }
__device__ __forceinline__ int64_t maxg(int64_t a, int64_t b) { return max(a, b); }
__device__ __forceinline__ uint32_t ming(uint32_t a, uint32_t b) { return min(a, b); }
__device__ __forceinline__ uint32_t maxg(uint32_t a, uint32_t b) { return max(a, b); }
__device__ __forceinline__ uint8_t ming(uint8_t a, uint8_t b) { return min(a, b); }
__device__ __forceinline__ uint8_t maxg(uint8_t a, uint8_t b) { return max(a, b); }
#if __CUDA_ARCH__ >= 530
__device__ __forceinline__ __half powg(__half a, __half b) { return __float2half(powf(__half2float(a), __half2float(b))); }
__device__ __forceinline__ bool isnang(__half a) { return __hisnan(a); }
__device__ __forceinline__ __half sqrtg(__half a) { return hsqrt(a); }
__device__ __forceinline__ __half cosg(__half a) { return hcos(a); }
__device__ __forceinline__ __half sing(__half a) { return hsin(a); }
__device__ __forceinline__ __half recipg(__half a) { __half one = 1.0; return one / a; }
__device__ __forceinline__ __half maxg(__half a, __half b) { return __hmax_nan(a, b); }
__device__ __forceinline__ __half tanhg(__half a) { return __float2half(tanhf(__half2float(a))); }
__device__ __forceinline__ __half erfg(__half a) { return __float2half(erff(__half2float(a))); }
__device__ __forceinline__ __half ceilg(__half a) { return __float2half(ceilf(__half2float(a))); }
__device__ __forceinline__ __half floorg(__half a) { return __float2half(floorf(__half2float(a))); }
__device__ __forceinline__ __half roundg(__half a) { return __float2half(roundf(__half2float(a))); }
__device__ __forceinline__ __half normcdfg(__half a) { return __float2half(normcdff(__half2float(a))); }
__device__ __forceinline__ __half ming(__half a, __half b) { return __hmin_nan(a, b); }
__device__ __forceinline__ __half logg(__half a) { return hlog(a); }
__device__ __forceinline__ __half expg(__half a) { return hexp(a); }
__device__ __forceinline__ __half absg(__half a) { return __habs(a); }
__device__ __forceinline__ __half copysigng(__half a, __half b) { return __float2half(copysignf(__half2float(a), __half2float(b))); }
#endif
#if __CUDA_ARCH__ >= 800
__device__ __forceinline__ __nv_bfloat16 powg(__nv_bfloat16 a, __nv_bfloat16 b) { return __float2bfloat16(powf(__bfloat162float(a), __bfloat162float(b))); }
__device__ __forceinline__ bool isnang(__nv_bfloat16 a) { return __hisnan(a); }
__device__ __forceinline__ __nv_bfloat16 sqrtg(__nv_bfloat16 a) { return hsqrt(a); }
__device__ __forceinline__ __nv_bfloat16 cosg(__nv_bfloat16 a) { return hcos(a); }
__device__ __forceinline__ __nv_bfloat16 sing(__nv_bfloat16 a) { return hsin(a); }
__device__ __forceinline__ __nv_bfloat16 recipg(__nv_bfloat16 a) { __nv_bfloat16 one = 1.0; return one / a; }
__device__ __forceinline__ __nv_bfloat16 maxg(__nv_bfloat16 a, __nv_bfloat16 b) { return __hmax_nan(a, b); }
__device__ __forceinline__ __nv_bfloat16 tanhg(__nv_bfloat16 a) { return __float2bfloat16(tanhf(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 erfg(__nv_bfloat16 a) { return __float2bfloat16(erff(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 ceilg(__nv_bfloat16 a) { return __float2bfloat16(ceilf(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 floorg(__nv_bfloat16 a) { return __float2bfloat16(floorf(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 roundg(__nv_bfloat16 a) { return __float2bfloat16(roundf(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 normcdfg(__nv_bfloat16 a) { return __float2bfloat16(normcdff(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 ming(__nv_bfloat16 a, __nv_bfloat16 b) { return __hmin_nan(a, b); }
__device__ __forceinline__ __nv_bfloat16 logg(__nv_bfloat16 a) { return hlog(a); }
__device__ __forceinline__ __nv_bfloat16 expg(__nv_bfloat16 a) { return hexp(a); }
__device__ __forceinline__ __nv_bfloat16 absg(__nv_bfloat16 a) { return __habs(a); }
__device__ __forceinline__ __nv_bfloat16 copysigng(__nv_bfloat16 a, __nv_bfloat16 b) { return __float2bfloat16(copysignf(__bfloat162float(a), __bfloat162float(b))); }
#endif
| candle/candle-kernels/src/cuda_utils.cuh/0 | {
"file_path": "candle/candle-kernels/src/cuda_utils.cuh",
"repo_id": "candle",
"token_count": 3936
} | 27 |
#include <metal_stdlib>
using namespace metal;
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define SWAP(x, y) { auto tmp = (x); (x) = (y); (y) = tmp; }
#define QK4_0 32
#define QR4_0 2
typedef struct {
half d; // delta
uint8_t qs[QK4_0 / 2]; // nibbles / quants
} block_q4_0;
#define QK4_1 32
typedef struct {
half d; // delta
half m; // min
uint8_t qs[QK4_1 / 2]; // nibbles / quants
} block_q4_1;
#define QK5_0 32
typedef struct {
half d; // delta
uint8_t qh[4]; // 5-th bit of quants
uint8_t qs[QK5_0 / 2]; // nibbles / quants
} block_q5_0;
#define QK5_1 32
typedef struct {
half d; // delta
half m; // min
uint8_t qh[4]; // 5-th bit of quants
uint8_t qs[QK5_1 / 2]; // nibbles / quants
} block_q5_1;
#define QK8_0 32
typedef struct {
half d; // delta
int8_t qs[QK8_0]; // quants
} block_q8_0;
#define N_SIMDWIDTH 32 // assuming SIMD group size is 32
enum ggml_sort_order {
GGML_SORT_ASC,
GGML_SORT_DESC,
};
// general-purpose kernel for addition, multiplication and division of two tensors
// pros: works for non-contiguous tensors, supports broadcast across all dims
// cons: not very efficient
kernel void kernel_add(
device const char * src0,
device const char * src1,
device char * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant uint64_t & nb13,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
constant int64_t & offs,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig.z;
const int64_t i02 = tgpig.y;
const int64_t i01 = tgpig.x;
const int64_t i13 = i03 % ne13;
const int64_t i12 = i02 % ne12;
const int64_t i11 = i01 % ne11;
device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01 + offs;
device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11;
device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + offs;
for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
const int i10 = i0 % ne10;
*((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) + *((device float *)(src1_ptr + i10*nb10));
}
}
kernel void kernel_mul(
device const char * src0,
device const char * src1,
device char * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant uint64_t & nb13,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig.z;
const int64_t i02 = tgpig.y;
const int64_t i01 = tgpig.x;
const int64_t i13 = i03 % ne13;
const int64_t i12 = i02 % ne12;
const int64_t i11 = i01 % ne11;
device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01;
device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11;
device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1;
for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
const int i10 = i0 % ne10;
*((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) * *((device float *)(src1_ptr + i10*nb10));
}
}
kernel void kernel_div(
device const char * src0,
device const char * src1,
device char * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant uint64_t & nb13,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig.z;
const int64_t i02 = tgpig.y;
const int64_t i01 = tgpig.x;
const int64_t i13 = i03 % ne13;
const int64_t i12 = i02 % ne12;
const int64_t i11 = i01 % ne11;
device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01;
device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11;
device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1;
for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
const int i10 = i0 % ne10;
*((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) / *((device float *)(src1_ptr + i10*nb10));
}
}
// assumption: src1 is a row
// broadcast src1 into src0
kernel void kernel_add_row(
device const float4 * src0,
device const float4 * src1,
device float4 * dst,
constant uint64_t & nb [[buffer(28)]],
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] + src1[tpig % nb];
}
kernel void kernel_mul_row(
device const float4 * src0,
device const float4 * src1,
device float4 * dst,
constant uint64_t & nb [[buffer(28)]],
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] * src1[tpig % nb];
}
kernel void kernel_div_row(
device const float4 * src0,
device const float4 * src1,
device float4 * dst,
constant uint64_t & nb [[buffer(28)]],
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] / src1[tpig % nb];
}
kernel void kernel_scale(
device const float * src0,
device float * dst,
constant float & scale,
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] * scale;
}
kernel void kernel_scale_4(
device const float4 * src0,
device float4 * dst,
constant float & scale,
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] * scale;
}
kernel void kernel_relu(
device const float * src0,
device float * dst,
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = max(0.0f, src0[tpig]);
}
kernel void kernel_tanh(
device const float * src0,
device float * dst,
uint tpig[[thread_position_in_grid]]) {
device const float & x = src0[tpig];
dst[tpig] = precise::tanh(x);
}
constant float GELU_COEF_A = 0.044715f;
constant float GELU_QUICK_COEF = -1.702f;
constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
kernel void kernel_gelu(
device const float4 * src0,
device float4 * dst,
uint tpig[[thread_position_in_grid]]) {
device const float4 & x = src0[tpig];
// BEWARE !!!
// Simply using "tanh" instead of "precise::tanh" will sometimes results in NaNs!
// This was observed with Falcon 7B and 40B models
//
dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
}
kernel void kernel_gelu_quick(
device const float4 * src0,
device float4 * dst,
uint tpig[[thread_position_in_grid]]) {
device const float4 & x = src0[tpig];
dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x)));
}
kernel void kernel_silu(
device const float4 * src0,
device float4 * dst,
uint tpig[[thread_position_in_grid]]) {
device const float4 & x = src0[tpig];
dst[tpig] = x / (1.0f + exp(-x));
}
kernel void kernel_sqr(
device const float * src0,
device float * dst,
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] * src0[tpig];
}
kernel void kernel_sum_rows(
device const float * src0,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant uint64_t & nb13,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tpig[[thread_position_in_grid]]) {
int64_t i3 = tpig.z;
int64_t i2 = tpig.y;
int64_t i1 = tpig.x;
if (i3 >= ne03 || i2 >= ne02 || i1 >= ne01) {
return;
}
device const float * src_row = (device const float *) ((device const char *) src0 + i1*nb01 + i2*nb02 + i3*nb03);
device float * dst_row = (device float *) ((device char *) dst + i1*nb1 + i2*nb2 + i3*nb3);
float row_sum = 0;
for (int64_t i0 = 0; i0 < ne00; i0++) {
row_sum += src_row[i0];
}
dst_row[0] = row_sum;
}
kernel void kernel_soft_max(
device const float * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant float & scale,
threadgroup float * buf [[threadgroup(0)]],
uint tgpig[[threadgroup_position_in_grid]],
uint tpitg[[thread_position_in_threadgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint ntg[[threads_per_threadgroup]]) {
const int64_t i03 = (tgpig) / (ne02*ne01);
const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01;
const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01);
device const float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
device const float * pmask = src1 != src0 ? src1 + i01*ne00 : nullptr;
device float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
// parallel max
float lmax = -INFINITY;
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
lmax = MAX(lmax, psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f));
}
// find the max value in the block
float max_val = simd_max(lmax);
if (ntg > N_SIMDWIDTH) {
if (sgitg == 0) {
buf[tiisg] = -INFINITY;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (tiisg == 0) {
buf[sgitg] = max_val;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
max_val = buf[tiisg];
max_val = simd_max(max_val);
}
// parallel sum
float lsum = 0.0f;
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
const float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f)) - max_val);
lsum += exp_psrc0;
pdst[i00] = exp_psrc0;
}
// This barrier fixes a failing test
// ref: https://github.com/ggerganov/ggml/pull/621#discussion_r1425156335
threadgroup_barrier(mem_flags::mem_none);
float sum = simd_sum(lsum);
if (ntg > N_SIMDWIDTH) {
if (sgitg == 0) {
buf[tiisg] = 0.0f;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (tiisg == 0) {
buf[sgitg] = sum;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
sum = buf[tiisg];
sum = simd_sum(sum);
}
const float inv_sum = 1.0f/sum;
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
pdst[i00] *= inv_sum;
}
}
kernel void kernel_soft_max_4(
device const float * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant float & scale,
threadgroup float * buf [[threadgroup(0)]],
uint tgpig[[threadgroup_position_in_grid]],
uint tpitg[[thread_position_in_threadgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint ntg[[threads_per_threadgroup]]) {
const int64_t i03 = (tgpig) / (ne02*ne01);
const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01;
const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01);
device const float4 * psrc4 = (device const float4 *)(src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
device const float4 * pmask = src1 != src0 ? (device const float4 *)(src1 + i01*ne00) : nullptr;
device float4 * pdst4 = (device float4 *)(dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
// parallel max
float4 lmax4 = -INFINITY;
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
lmax4 = fmax(lmax4, psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f));
}
const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3]));
float max_val = simd_max(lmax);
if (ntg > N_SIMDWIDTH) {
if (sgitg == 0) {
buf[tiisg] = -INFINITY;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (tiisg == 0) {
buf[sgitg] = max_val;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
max_val = buf[tiisg];
max_val = simd_max(max_val);
}
// parallel sum
float4 lsum4 = 0.0f;
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
const float4 exp_psrc4 = exp((psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f)) - max_val);
lsum4 += exp_psrc4;
pdst4[i00] = exp_psrc4;
}
const float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3];
// This barrier fixes a failing test
// ref: https://github.com/ggerganov/ggml/pull/621#discussion_r1425156335
threadgroup_barrier(mem_flags::mem_none);
float sum = simd_sum(lsum);
if (ntg > N_SIMDWIDTH) {
if (sgitg == 0) {
buf[tiisg] = 0.0f;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (tiisg == 0) {
buf[sgitg] = sum;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
sum = buf[tiisg];
sum = simd_sum(sum);
}
const float inv_sum = 1.0f/sum;
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
pdst4[i00] *= inv_sum;
}
}
kernel void kernel_diag_mask_inf(
device const float * src0,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int & n_past,
uint3 tpig[[thread_position_in_grid]]) {
const int64_t i02 = tpig[2];
const int64_t i01 = tpig[1];
const int64_t i00 = tpig[0];
if (i00 > n_past + i01) {
dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY;
} else {
dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00];
}
}
kernel void kernel_diag_mask_inf_8(
device const float4 * src0,
device float4 * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int & n_past,
uint3 tpig[[thread_position_in_grid]]) {
const int64_t i = 2*tpig[0];
dst[i+0] = src0[i+0];
dst[i+1] = src0[i+1];
int64_t i4 = 4*i;
const int64_t i02 = i4/(ne00*ne01); i4 -= i02*ne00*ne01;
const int64_t i01 = i4/(ne00); i4 -= i01*ne00;
const int64_t i00 = i4;
for (int k = 3; k >= 0; --k) {
if (i00 + 4 + k <= n_past + i01) {
break;
}
dst[i+1][k] = -INFINITY;
if (i00 + k > n_past + i01) {
dst[i][k] = -INFINITY;
}
}
}
kernel void kernel_norm(
device const void * src0,
device float * dst,
constant int64_t & ne00,
constant uint64_t & nb01,
constant float & eps,
threadgroup float * sum [[threadgroup(0)]],
uint tgpig[[threadgroup_position_in_grid]],
uint tpitg[[thread_position_in_threadgroup]],
uint ntg[[threads_per_threadgroup]]) {
device const float * x = (device const float *) ((device const char *) src0 + tgpig*nb01);
// MEAN
// parallel sum
sum[tpitg] = 0.0f;
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
sum[tpitg] += x[i00];
}
// reduce
threadgroup_barrier(mem_flags::mem_threadgroup);
for (uint i = ntg/2; i > 0; i /= 2) {
if (tpitg < i) {
sum[tpitg] += sum[tpitg + i];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
}
const float mean = sum[0] / ne00;
// recenter and VARIANCE
threadgroup_barrier(mem_flags::mem_threadgroup);
device float * y = dst + tgpig*ne00;
sum[tpitg] = 0.0f;
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
y[i00] = x[i00] - mean;
sum[tpitg] += y[i00] * y[i00];
}
// reduce
threadgroup_barrier(mem_flags::mem_threadgroup);
for (uint i = ntg/2; i > 0; i /= 2) {
if (tpitg < i) {
sum[tpitg] += sum[tpitg + i];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
}
const float variance = sum[0] / ne00;
const float scale = 1.0f/sqrt(variance + eps);
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
y[i00] = y[i00] * scale;
}
}
kernel void kernel_rms_norm(
device const void * src0,
device float * dst,
constant int64_t & ne00,
constant uint64_t & nb01,
constant float & eps,
threadgroup float * buf [[threadgroup(0)]],
uint tgpig[[threadgroup_position_in_grid]],
uint tpitg[[thread_position_in_threadgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint ntg[[threads_per_threadgroup]]) {
device const float4 * x = (device const float4 *) ((device const char *) src0 + tgpig*nb01);
float4 sumf = 0;
float all_sum = 0;
// parallel sum
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
sumf += x[i00] * x[i00];
}
all_sum = sumf[0] + sumf[1] + sumf[2] + sumf[3];
all_sum = simd_sum(all_sum);
if (ntg > N_SIMDWIDTH) {
if (sgitg == 0) {
buf[tiisg] = 0.0f;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (tiisg == 0) {
buf[sgitg] = all_sum;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
all_sum = buf[tiisg];
all_sum = simd_sum(all_sum);
}
const float mean = all_sum/ne00;
const float scale = 1.0f/sqrt(mean + eps);
device float4 * y = (device float4 *) (dst + tgpig*ne00);
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
y[i00] = x[i00] * scale;
}
}
kernel void kernel_group_norm(
device const float * src0,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int32_t & n_groups,
constant float & eps,
threadgroup float * buf [[threadgroup(0)]],
uint tgpig[[threadgroup_position_in_grid]],
uint tpitg[[thread_position_in_threadgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint ntg[[threads_per_threadgroup]]) {
const int64_t ne = ne00*ne01*ne02;
const int64_t gs = ne00*ne01*((ne02 + n_groups - 1) / n_groups);
int start = tgpig * gs;
int end = start + gs;
start += tpitg;
if (end >= ne) {
end = ne;
}
float tmp = 0.0f; // partial sum for thread in warp
for (int j = start; j < end; j += ntg) {
tmp += src0[j];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
tmp = simd_sum(tmp);
if (ntg > N_SIMDWIDTH) {
if (sgitg == 0) {
buf[tiisg] = 0.0f;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (tiisg == 0) {
buf[sgitg] = tmp;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
tmp = buf[tiisg];
tmp = simd_sum(tmp);
}
const float mean = tmp / gs;
tmp = 0.0f;
for (int j = start; j < end; j += ntg) {
float xi = src0[j] - mean;
dst[j] = xi;
tmp += xi * xi;
}
tmp = simd_sum(tmp);
if (ntg > N_SIMDWIDTH) {
if (sgitg == 0) {
buf[tiisg] = 0.0f;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (tiisg == 0) {
buf[sgitg] = tmp;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
tmp = buf[tiisg];
tmp = simd_sum(tmp);
}
const float variance = tmp / gs;
const float scale = 1.0f/sqrt(variance + eps);
for (int j = start; j < end; j += ntg) {
dst[j] *= scale;
}
}
// function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i])
// il indicates where the q4 quants begin (0 or QK4_0/4)
// we assume that the yl's have been multiplied with the appropriate scale factor
// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
inline float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl, int il) {
float d = qb_curr->d;
float2 acc = 0.f;
device const uint16_t * qs = ((device const uint16_t *)qb_curr + 1 + il/2);
for (int i = 0; i < 8; i+=2) {
acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F)
+ yl[i + 1] * (qs[i / 2] & 0x0F00);
acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0)
+ yl[i + 9] * (qs[i / 2] & 0xF000);
}
return d * (sumy * -8.f + acc[0] + acc[1]);
}
// function for calculate inner product between half a q4_1 block and 16 floats (yl), sumy is SUM(yl[i])
// il indicates where the q4 quants begin (0 or QK4_0/4)
// we assume that the yl's have been multiplied with the appropriate scale factor
// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl, int il) {
float d = qb_curr->d;
float m = qb_curr->m;
float2 acc = 0.f;
device const uint16_t * qs = ((device const uint16_t *)qb_curr + 2 + il/2);
for (int i = 0; i < 8; i+=2) {
acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F)
+ yl[i + 1] * (qs[i / 2] & 0x0F00);
acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0)
+ yl[i + 9] * (qs[i / 2] & 0xF000);
}
return d * (acc[0] + acc[1]) + sumy * m;
}
// function for calculate inner product between half a q5_0 block and 16 floats (yl), sumy is SUM(yl[i])
// il indicates where the q5 quants begin (0 or QK5_0/4)
// we assume that the yl's have been multiplied with the appropriate scale factor
// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
inline float block_q_n_dot_y(device const block_q5_0 * qb_curr, float sumy, thread float * yl, int il) {
float d = qb_curr->d;
float2 acc = 0.f;
device const uint16_t * qs = ((device const uint16_t *)qb_curr + 3 + il/2);
const uint32_t qh = *((device const uint32_t *)qb_curr->qh);
for (int i = 0; i < 8; i+=2) {
acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010))
+ yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000));
acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100))
+ yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000));
}
return d * (sumy * -16.f + acc[0] + acc[1]);
}
// function for calculate inner product between half a q5_1 block and 16 floats (yl), sumy is SUM(yl[i])
// il indicates where the q5 quants begin (0 or QK5_1/4)
// we assume that the yl's have been multiplied with the appropriate scale factor
// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
inline float block_q_n_dot_y(device const block_q5_1 * qb_curr, float sumy, thread float * yl, int il) {
float d = qb_curr->d;
float m = qb_curr->m;
float2 acc = 0.f;
device const uint16_t * qs = ((device const uint16_t *)qb_curr + 4 + il/2);
const uint32_t qh = *((device const uint32_t *)qb_curr->qh);
for (int i = 0; i < 8; i+=2) {
acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010))
+ yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000));
acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100))
+ yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000));
}
return d * (acc[0] + acc[1]) + sumy * m;
}
// putting them in the kernel cause a significant performance penalty
#define N_DST 4 // each SIMD group works on 4 rows
#define N_SIMDGROUP 2 // number of SIMD groups in a thread group
//Note: This is a template, but strictly speaking it only applies to
// quantizations where the block size is 32. It also does not
// guard against the number of rows not being divisible by
// N_DST, so this is another explicit assumption of the implementation.
template<typename block_q_type, int nr, int nsg, int nw>
void mul_vec_q_n_f32_impl(
device const void * src0,
device const float * src1,
device float * dst,
int64_t ne00,
int64_t ne01,
int64_t ne02,
int64_t ne10,
int64_t ne12,
int64_t ne0,
int64_t ne1,
uint r2,
uint r3,
uint3 tgpig, uint tiisg, uint sgitg) {
const int nb = ne00/QK4_0;
const int r0 = tgpig.x;
const int r1 = tgpig.y;
const int im = tgpig.z;
const int first_row = (r0 * nsg + sgitg) * nr;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
device const block_q_type * x = (device const block_q_type *) src0 + offset0;
device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
float yl[16]; // src1 vector cache
float sumf[nr] = {0.f};
const int ix = (tiisg/2);
const int il = (tiisg%2)*8;
device const float * yb = y + ix * QK4_0 + il;
// each thread in a SIMD group deals with half a block.
for (int ib = ix; ib < nb; ib += nw/2) {
float sumy = 0;
for (int i = 0; i < 8; i += 2) {
sumy += yb[i] + yb[i+1];
yl[i+0] = yb[i+ 0];
yl[i+1] = yb[i+ 1]/256.f;
sumy += yb[i+16] + yb[i+17];
yl[i+8] = yb[i+16]/16.f;
yl[i+9] = yb[i+17]/4096.f;
}
for (int row = 0; row < nr; row++) {
sumf[row] += block_q_n_dot_y(x+ib+row*nb, sumy, yl, il);
}
yb += QK4_0 * 16;
}
for (int row = 0; row < nr; ++row) {
const float tot = simd_sum(sumf[row]);
if (tiisg == 0 && first_row + row < ne01) {
dst[im*ne0*ne1 + r1*ne0 + first_row + row] = tot;
}
}
}
kernel void kernel_mul_mv_q4_0_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
mul_vec_q_n_f32_impl<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg);
}
kernel void kernel_mul_mv_q4_1_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
mul_vec_q_n_f32_impl<block_q4_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg);
}
kernel void kernel_mul_mv_q5_0_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
mul_vec_q_n_f32_impl<block_q5_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg);
}
kernel void kernel_mul_mv_q5_1_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
mul_vec_q_n_f32_impl<block_q5_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg);
}
#define NB_Q8_0 8
void kernel_mul_mv_q8_0_f32_impl(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne10,
constant int64_t & ne12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const int nr = N_DST;
const int nsg = N_SIMDGROUP;
const int nw = N_SIMDWIDTH;
const int nb = ne00/QK8_0;
const int r0 = tgpig.x;
const int r1 = tgpig.y;
const int im = tgpig.z;
const int first_row = (r0 * nsg + sgitg) * nr;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
device const block_q8_0 * x = (device const block_q8_0 *) src0 + offset0;
device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
float yl[NB_Q8_0];
float sumf[nr]={0.f};
const int ix = tiisg/4;
const int il = tiisg%4;
device const float * yb = y + ix * QK8_0 + NB_Q8_0*il;
// each thread in a SIMD group deals with NB_Q8_0 quants at a time
for (int ib = ix; ib < nb; ib += nw/4) {
for (int i = 0; i < NB_Q8_0; ++i) {
yl[i] = yb[i];
}
for (int row = 0; row < nr; row++) {
device const int8_t * qs = x[ib+row*nb].qs + NB_Q8_0*il;
float sumq = 0.f;
for (int iq = 0; iq < NB_Q8_0; ++iq) {
sumq += qs[iq] * yl[iq];
}
sumf[row] += sumq*x[ib+row*nb].d;
}
yb += NB_Q8_0 * nw;
}
for (int row = 0; row < nr; ++row) {
const float tot = simd_sum(sumf[row]);
if (tiisg == 0 && first_row + row < ne01) {
dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot;
}
}
}
[[host_name("kernel_mul_mv_q8_0_f32")]]
kernel void kernel_mul_mv_q8_0_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
kernel_mul_mv_q8_0_f32_impl(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg);
}
#define N_F32_F32 4
void kernel_mul_mv_f32_f32_impl(
device const char * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]]) {
const int64_t r0 = tgpig.x;
const int64_t rb = tgpig.y*N_F32_F32;
const int64_t im = tgpig.z;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02;
device const float * x = (device const float *) (src0 + offset0);
if (ne00 < 128) {
for (int row = 0; row < N_F32_F32; ++row) {
int r1 = rb + row;
if (r1 >= ne11) {
break;
}
device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12);
float sumf = 0;
for (int i = tiisg; i < ne00; i += 32) {
sumf += (float) x[i] * (float) y[i];
}
float all_sum = simd_sum(sumf);
if (tiisg == 0) {
dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum;
}
}
} else {
device const float4 * x4 = (device const float4 *)x;
for (int row = 0; row < N_F32_F32; ++row) {
int r1 = rb + row;
if (r1 >= ne11) {
break;
}
device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12);
device const float4 * y4 = (device const float4 *) y;
float sumf = 0;
for (int i = tiisg; i < ne00/4; i += 32) {
for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k];
}
float all_sum = simd_sum(sumf);
if (tiisg == 0) {
for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i];
dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum;
}
}
}
}
[[host_name("kernel_mul_mv_f32_f32")]]
kernel void kernel_mul_mv_f32_f32(
device const char * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]]) {
kernel_mul_mv_f32_f32_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg);
}
#define N_F16_F16 4
kernel void kernel_mul_mv_f16_f16(
device const char * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]]) {
const int64_t r0 = tgpig.x;
const int64_t rb = tgpig.y*N_F16_F16;
const int64_t im = tgpig.z;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02;
device const half * x = (device const half *) (src0 + offset0);
if (ne00 < 128) {
for (int row = 0; row < N_F16_F16; ++row) {
int r1 = rb + row;
if (r1 >= ne11) {
break;
}
device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12);
float sumf = 0;
for (int i = tiisg; i < ne00; i += 32) {
sumf += (half) x[i] * (half) y[i];
}
float all_sum = simd_sum(sumf);
if (tiisg == 0) {
dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum;
}
}
} else {
device const half4 * x4 = (device const half4 *)x;
for (int row = 0; row < N_F16_F16; ++row) {
int r1 = rb + row;
if (r1 >= ne11) {
break;
}
device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12);
device const half4 * y4 = (device const half4 *) y;
float sumf = 0;
for (int i = tiisg; i < ne00/4; i += 32) {
for (int k = 0; k < 4; ++k) sumf += (half) x4[i][k] * y4[i][k];
}
float all_sum = simd_sum(sumf);
if (tiisg == 0) {
for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (half) x[i] * y[i];
dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum;
}
}
}
}
void kernel_mul_mv_f16_f32_1row_impl(
device const char * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]]) {
const int64_t r0 = tgpig.x;
const int64_t r1 = tgpig.y;
const int64_t im = tgpig.z;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02;
device const half * x = (device const half *) (src0 + offset0);
device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12);
float sumf = 0;
if (ne00 < 128) {
for (int i = tiisg; i < ne00; i += 32) {
sumf += (float) x[i] * (float) y[i];
}
float all_sum = simd_sum(sumf);
if (tiisg == 0) {
dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum;
}
} else {
device const half4 * x4 = (device const half4 *) x;
device const float4 * y4 = (device const float4 *) y;
for (int i = tiisg; i < ne00/4; i += 32) {
for (int k = 0; k < 4; ++k) sumf += (float)x4[i][k] * y4[i][k];
}
float all_sum = simd_sum(sumf);
if (tiisg == 0) {
for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i];
dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum;
}
}
}
[[host_name("kernel_mul_mv_f16_f32_1row")]]
kernel void kernel_mul_mv_f16_f32_1row(
device const char * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]]) {
kernel_mul_mv_f16_f32_1row_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg);
}
#define N_F16_F32 4
void kernel_mul_mv_f16_f32_impl(
device const char * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]]) {
const int64_t r0 = tgpig.x;
const int64_t rb = tgpig.y*N_F16_F32;
const int64_t im = tgpig.z;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02;
device const half * x = (device const half *) (src0 + offset0);
if (ne00 < 128) {
for (int row = 0; row < N_F16_F32; ++row) {
int r1 = rb + row;
if (r1 >= ne11) {
break;
}
device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12);
float sumf = 0;
for (int i = tiisg; i < ne00; i += 32) {
sumf += (float) x[i] * (float) y[i];
}
float all_sum = simd_sum(sumf);
if (tiisg == 0) {
dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum;
}
}
} else {
device const half4 * x4 = (device const half4 *)x;
for (int row = 0; row < N_F16_F32; ++row) {
int r1 = rb + row;
if (r1 >= ne11) {
break;
}
device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12);
device const float4 * y4 = (device const float4 *) y;
float sumf = 0;
for (int i = tiisg; i < ne00/4; i += 32) {
for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k];
}
float all_sum = simd_sum(sumf);
if (tiisg == 0) {
for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i];
dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum;
}
}
}
}
[[host_name("kernel_mul_mv_f16_f32")]]
kernel void kernel_mul_mv_f16_f32(
device const char * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]]) {
kernel_mul_mv_f16_f32_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg);
}
// Assumes row size (ne00) is a multiple of 4
kernel void kernel_mul_mv_f16_f32_l4(
device const char * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]]) {
const int nrows = ne11;
const int64_t r0 = tgpig.x;
const int64_t im = tgpig.z;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02;
device const half4 * x4 = (device const half4 *) (src0 + offset0);
for (int r1 = 0; r1 < nrows; ++r1) {
device const float4 * y4 = (device const float4 *) (src1 + r1*nb11 + im*nb12);
float sumf = 0;
for (int i = tiisg; i < ne00/4; i += 32) {
for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k];
}
float all_sum = simd_sum(sumf);
if (tiisg == 0) {
dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum;
}
}
}
kernel void kernel_alibi_f32(
device const float * src0,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
constant float & m0,
constant float & m1,
constant int & n_heads_log2_floor,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig[2];
const int64_t i02 = tgpig[1];
const int64_t i01 = tgpig[0];
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
const int64_t i3 = n / (ne2*ne1*ne0);
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
//const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
const int64_t k = i3*ne3 + i2;
float m_k;
if (k < n_heads_log2_floor) {
m_k = pow(m0, k + 1);
} else {
m_k = pow(m1, 2 * (k - n_heads_log2_floor) + 1);
}
device char * dst_row = (device char *) dst + i3*nb3 + i2*nb2 + i1*nb1;
device const char * src_row = (device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01;
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
const float src_v = *(device float *)(src_row + i00*nb00);
device float * dst_v = (device float *)(dst_row + i00*nb0);
*dst_v = i00 * m_k + src_v;
}
}
static float rope_yarn_ramp(const float low, const float high, const int i0) {
const float y = (i0 / 2 - low) / max(0.001f, high - low);
return 1.0f - min(1.0f, max(0.0f, y));
}
// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
static void rope_yarn(
float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
thread float * cos_theta, thread float * sin_theta
) {
// Get n-d rotational scaling corrected for extrapolation
float theta_interp = freq_scale * theta_extrap;
float theta = theta_interp;
if (ext_factor != 0.0f) {
float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
// Get n-d magnitude scaling corrected for interpolation
mscale *= 1.0f + 0.1f * log(1.0f / freq_scale);
}
*cos_theta = cos(theta) * mscale;
*sin_theta = sin(theta) * mscale;
}
// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
// `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
static float rope_yarn_corr_factor(int n_dims, int n_orig_ctx, float n_rot, float base) {
return n_dims * log(n_orig_ctx / (n_rot * 2 * M_PI_F)) / (2 * log(base));
}
static void rope_yarn_corr_dims(
int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
) {
// start and end correction dims
dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_fast, freq_base)));
dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_slow, freq_base)));
}
typedef void (rope_t)(
device const void * src0,
device const int32_t * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
constant int & n_past,
constant int & n_dims,
constant int & mode,
constant int & n_orig_ctx,
constant float & freq_base,
constant float & freq_scale,
constant float & ext_factor,
constant float & attn_factor,
constant float & beta_fast,
constant float & beta_slow,
uint tiitg[[thread_index_in_threadgroup]],
uint3 tptg[[threads_per_threadgroup]],
uint3 tgpig[[threadgroup_position_in_grid]]);
template<typename T>
kernel void kernel_rope(
device const void * src0,
device const int32_t * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
constant int & n_past,
constant int & n_dims,
constant int & mode,
constant int & n_orig_ctx,
constant float & freq_base,
constant float & freq_scale,
constant float & ext_factor,
constant float & attn_factor,
constant float & beta_fast,
constant float & beta_slow,
uint tiitg[[thread_index_in_threadgroup]],
uint3 tptg[[threads_per_threadgroup]],
uint3 tgpig[[threadgroup_position_in_grid]]) {
const int64_t i3 = tgpig[2];
const int64_t i2 = tgpig[1];
const int64_t i1 = tgpig[0];
const bool is_neox = mode & 2;
float corr_dims[2];
rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
device const int32_t * pos = src1;
const int64_t p = pos[i2];
const float theta_0 = (float)p;
const float inv_ndims = -1.f/n_dims;
if (!is_neox) {
for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) {
const float theta = theta_0 * pow(freq_base, inv_ndims*i0);
float cos_theta, sin_theta;
rope_yarn(theta, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
const T x0 = src[0];
const T x1 = src[1];
dst_data[0] = x0*cos_theta - x1*sin_theta;
dst_data[1] = x0*sin_theta + x1*cos_theta;
}
} else {
for (int64_t ic = 2*tiitg; ic < ne0; ic += 2*tptg.x) {
if (ic < n_dims) {
const int64_t ib = 0;
// simplified from `(ib * n_dims + ic) * inv_ndims`
const float cur_rot = inv_ndims*ic - ib;
const float theta = theta_0 * pow(freq_base, cur_rot);
float cos_theta, sin_theta;
rope_yarn(theta, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta);
const int64_t i0 = ib*n_dims + ic/2;
device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
const float x0 = src[0];
const float x1 = src[n_dims/2];
dst_data[0] = x0*cos_theta - x1*sin_theta;
dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
} else {
const int64_t i0 = ic;
device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
dst_data[0] = src[0];
dst_data[1] = src[1];
}
}
}
}
template [[host_name("kernel_rope_f32")]] kernel rope_t kernel_rope<float>;
template [[host_name("kernel_rope_f16")]] kernel rope_t kernel_rope<half>;
kernel void kernel_im2col_f16(
device const float * x,
device half * dst,
constant int32_t & ofs0,
constant int32_t & ofs1,
constant int32_t & IW,
constant int32_t & IH,
constant int32_t & CHW,
constant int32_t & s0,
constant int32_t & s1,
constant int32_t & p0,
constant int32_t & p1,
constant int32_t & d0,
constant int32_t & d1,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tgpg[[threadgroups_per_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int32_t iiw = tgpig[2] * s0 + tpitg[2] * d0 - p0;
const int32_t iih = tgpig[1] * s1 + tpitg[1] * d1 - p1;
const int32_t offset_dst =
(tpitg[0] * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * CHW +
(tgpig[0] * (ntg[1] * ntg[2]) + tpitg[1] * ntg[2] + tpitg[2]);
if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
dst[offset_dst] = 0.0f;
} else {
const int32_t offset_src = tpitg[0] * ofs0 + tgpig[0] * ofs1;
dst[offset_dst] = x[offset_src + iih * IW + iiw];
}
}
kernel void kernel_upscale_f32(
device const char * src0,
device char * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
constant int32_t & sf,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i3 = tgpig.z;
const int64_t i2 = tgpig.y;
const int64_t i1 = tgpig.x;
const int64_t i03 = i3;
const int64_t i02 = i2;
const int64_t i01 = i1/sf;
device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01);
device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1);
for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
dst_ptr[i0] = src0_ptr[i0/sf];
}
}
kernel void kernel_pad_f32(
device const char * src0,
device char * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i3 = tgpig.z;
const int64_t i2 = tgpig.y;
const int64_t i1 = tgpig.x;
const int64_t i03 = i3;
const int64_t i02 = i2;
const int64_t i01 = i1;
device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01);
device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1);
if (i1 < ne01 && i2 < ne02 && i3 < ne03) {
for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
if (i0 < ne00) {
dst_ptr[i0] = src0_ptr[i0];
} else {
dst_ptr[i0] = 0.0f;
}
}
return;
}
for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
dst_ptr[i0] = 0.0f;
}
}
// bitonic sort implementation following the CUDA kernels as reference
typedef void (argsort_t)(
device const float * x,
device int32_t * dst,
constant int64_t & ncols,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]]);
template<ggml_sort_order order>
kernel void kernel_argsort_f32_i32(
device const float * x,
device int32_t * dst,
constant int64_t & ncols,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]]) {
// bitonic sort
int col = tpitg[0];
int row = tgpig[1];
if (col >= ncols) return;
device const float * x_row = x + row * ncols;
device int32_t * dst_row = dst + row * ncols;
// initialize indices
if (col < ncols) {
dst_row[col] = col;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
for (int k = 2; k <= ncols; k *= 2) {
for (int j = k / 2; j > 0; j /= 2) {
int ixj = col ^ j;
if (ixj > col) {
if ((col & k) == 0) {
if (order == GGML_SORT_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]]) {
SWAP(dst_row[col], dst_row[ixj]);
}
} else {
if (order == GGML_SORT_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]]) {
SWAP(dst_row[col], dst_row[ixj]);
}
}
}
threadgroup_barrier(mem_flags::mem_threadgroup);
}
}
}
template [[host_name("kernel_argsort_f32_i32_asc")]] kernel argsort_t kernel_argsort_f32_i32<GGML_SORT_ASC>;
template [[host_name("kernel_argsort_f32_i32_desc")]] kernel argsort_t kernel_argsort_f32_i32<GGML_SORT_DESC>;
kernel void kernel_leaky_relu_f32(
device const float * src0,
device float * dst,
constant float & slope,
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] > 0.0f ? src0[tpig] : src0[tpig] * slope;
}
kernel void kernel_cpy_f16_f16(
device const half * src0,
device half * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig[2];
const int64_t i02 = tgpig[1];
const int64_t i01 = tgpig[0];
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
const int64_t i3 = n / (ne2*ne1*ne0);
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
dst_data[i00] = src[0];
}
}
kernel void kernel_cpy_f16_f32(
device const half * src0,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig[2];
const int64_t i02 = tgpig[1];
const int64_t i01 = tgpig[0];
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
const int64_t i3 = n / (ne2*ne1*ne0);
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
dst_data[i00] = src[0];
}
}
kernel void kernel_cpy_f32_f16(
device const float * src0,
device half * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig[2];
const int64_t i02 = tgpig[1];
const int64_t i01 = tgpig[0];
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
const int64_t i3 = n / (ne2*ne1*ne0);
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
dst_data[i00] = src[0];
}
}
kernel void kernel_cpy_f32_f32(
device const float * src0,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig[2];
const int64_t i02 = tgpig[1];
const int64_t i01 = tgpig[0];
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
const int64_t i3 = n / (ne2*ne1*ne0);
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
dst_data[i00] = src[0];
}
}
kernel void kernel_cpy_f32_q8_0(
device const float * src0,
device void * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig[2];
const int64_t i02 = tgpig[1];
const int64_t i01 = tgpig[0];
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
const int64_t i3 = n / (ne2*ne1*ne0);
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK8_0;
device block_q8_0 * dst_data = (device block_q8_0 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
for (int64_t i00 = tpitg.x*QK8_0; i00 < ne00; i00 += ntg.x*QK8_0) {
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
float amax = 0.0f; // absolute max
for (int j = 0; j < QK8_0; j++) {
const float v = src[j];
amax = MAX(amax, fabs(v));
}
const float d = amax / ((1 << 7) - 1);
const float id = d ? 1.0f/d : 0.0f;
dst_data[i00/QK8_0].d = d;
for (int j = 0; j < QK8_0; ++j) {
const float x0 = src[j]*id;
dst_data[i00/QK8_0].qs[j] = round(x0);
}
}
}
kernel void kernel_cpy_f32_q4_0(
device const float * src0,
device void * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig[2];
const int64_t i02 = tgpig[1];
const int64_t i01 = tgpig[0];
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
const int64_t i3 = n / (ne2*ne1*ne0);
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK4_0;
device block_q4_0 * dst_data = (device block_q4_0 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
for (int64_t i00 = tpitg.x*QK4_0; i00 < ne00; i00 += ntg.x*QK4_0) {
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
float amax = 0.0f; // absolute max
float max = 0.0f;
for (int j = 0; j < QK4_0; j++) {
const float v = src[j];
if (amax < fabs(v)) {
amax = fabs(v);
max = v;
}
}
const float d = max / -8;
const float id = d ? 1.0f/d : 0.0f;
dst_data[i00/QK4_0].d = d;
for (int j = 0; j < QK4_0/2; ++j) {
const float x0 = src[0 + j]*id;
const float x1 = src[QK4_0/2 + j]*id;
const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
dst_data[i00/QK4_0].qs[j] = xi0;
dst_data[i00/QK4_0].qs[j] |= xi1 << 4;
}
}
}
kernel void kernel_cpy_f32_q4_1(
device const float * src0,
device void * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig[2];
const int64_t i02 = tgpig[1];
const int64_t i01 = tgpig[0];
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
const int64_t i3 = n / (ne2*ne1*ne0);
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK4_1;
device block_q4_1 * dst_data = (device block_q4_1 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
for (int64_t i00 = tpitg.x*QK4_1; i00 < ne00; i00 += ntg.x*QK4_1) {
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
float min = FLT_MAX;
float max = -FLT_MAX;
for (int j = 0; j < QK4_1; j++) {
const float v = src[j];
if (min > v) min = v;
if (max < v) max = v;
}
const float d = (max - min) / ((1 << 4) - 1);
const float id = d ? 1.0f/d : 0.0f;
dst_data[i00/QK4_1].d = d;
dst_data[i00/QK4_1].m = min;
for (int j = 0; j < QK4_1/2; ++j) {
const float x0 = (src[0 + j] - min)*id;
const float x1 = (src[QK4_1/2 + j] - min)*id;
const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
dst_data[i00/QK4_1].qs[j] = xi0;
dst_data[i00/QK4_1].qs[j] |= xi1 << 4;
}
}
}
kernel void kernel_concat(
device const char * src0,
device const char * src1,
device char * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne03,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant uint64_t & nb03,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant uint64_t & nb13,
constant int64_t & ne0,
constant int64_t & ne1,
constant int64_t & ne2,
constant int64_t & ne3,
constant uint64_t & nb0,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i03 = tgpig.z;
const int64_t i02 = tgpig.y;
const int64_t i01 = tgpig.x;
const int64_t i13 = i03 % ne13;
const int64_t i12 = i02 % ne12;
const int64_t i11 = i01 % ne11;
device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01 + tpitg.x*nb00;
device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11 + tpitg.x*nb10;
device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + tpitg.x*nb0;
for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
if (i02 < ne02) {
((device float *)dst_ptr)[0] = ((device float *)src0_ptr)[0];
src0_ptr += ntg.x*nb00;
} else {
((device float *)dst_ptr)[0] = ((device float *)src1_ptr)[0];
src1_ptr += ntg.x*nb10;
}
dst_ptr += ntg.x*nb0;
}
}
//============================================ k-quants ======================================================
#ifndef QK_K
#define QK_K 256
#else
static_assert(QK_K == 256 || QK_K == 64, "QK_K must be 256 or 64");
#endif
#if QK_K == 256
#define K_SCALE_SIZE 12
#else
#define K_SCALE_SIZE 4
#endif
typedef struct {
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
uint8_t qs[QK_K/4]; // quants
half d; // super-block scale for quantized scales
half dmin; // super-block scale for quantized mins
} block_q2_K;
// 84 bytes / block
typedef struct {
uint8_t hmask[QK_K/8]; // quants - high bit
uint8_t qs[QK_K/4]; // quants - low 2 bits
#if QK_K == 64
uint8_t scales[2];
#else
uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits
#endif
half d; // super-block scale
} block_q3_K;
#if QK_K == 64
typedef struct {
half d[2]; // super-block scales/mins
uint8_t scales[2];
uint8_t qs[QK_K/2]; // 4-bit quants
} block_q4_K;
#else
typedef struct {
half d; // super-block scale for quantized scales
half dmin; // super-block scale for quantized mins
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
uint8_t qs[QK_K/2]; // 4--bit quants
} block_q4_K;
#endif
#if QK_K == 64
typedef struct {
half d; // super-block scales/mins
int8_t scales[QK_K/16]; // 8-bit block scales
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
#else
typedef struct {
half d; // super-block scale for quantized scales
half dmin; // super-block scale for quantized mins
uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
// 176 bytes / block
#endif
typedef struct {
uint8_t ql[QK_K/2]; // quants, lower 4 bits
uint8_t qh[QK_K/4]; // quants, upper 2 bits
int8_t scales[QK_K/16]; // scales, quantized with 8 bits
half d; // super-block scale
} block_q6_K;
// 210 bytes / block
//====================================== dot products =========================
void kernel_mul_mv_q2_K_f32_impl(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne10,
constant int64_t & ne12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const int nb = ne00/QK_K;
const int r0 = tgpig.x;
const int r1 = tgpig.y;
const int im = tgpig.z;
const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST;
const int ib_row = first_row * nb;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
device const block_q2_K * x = (device const block_q2_K *) src0 + ib_row + offset0;
device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
float yl[32];
float sumf[N_DST]={0.f}, all_sum;
const int step = sizeof(block_q2_K) * nb;
#if QK_K == 256
const int ix = tiisg/8; // 0...3
const int it = tiisg%8; // 0...7
const int iq = it/4; // 0 or 1
const int ir = it%4; // 0...3
const int is = (8*ir)/16;// 0 or 1
device const float * y4 = y + ix * QK_K + 128 * iq + 8 * ir;
for (int ib = ix; ib < nb; ib += 4) {
float4 sumy = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; ++i) {
yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0];
yl[i+ 8] = y4[i+32]; sumy[1] += yl[i+ 8];
yl[i+16] = y4[i+64]; sumy[2] += yl[i+16];
yl[i+24] = y4[i+96]; sumy[3] += yl[i+24];
}
device const uint8_t * sc = (device const uint8_t *)x[ib].scales + 8*iq + is;
device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir;
device const half * dh = &x[ib].d;
for (int row = 0; row < N_DST; row++) {
float4 acc1 = {0.f, 0.f, 0.f, 0.f};
float4 acc2 = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; i += 2) {
acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003);
acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300);
acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c);
acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00);
acc1[2] += yl[i+16] * (qs[i/2] & 0x0030);
acc2[2] += yl[i+17] * (qs[i/2] & 0x3000);
acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0);
acc2[3] += yl[i+25] * (qs[i/2] & 0xc000);
}
float dall = dh[0];
float dmin = dh[1] * 1.f/16.f;
sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f +
(acc1[1] + 1.f/256.f * acc2[1]) * (sc[2] & 0xF) * 1.f/ 4.f +
(acc1[2] + 1.f/256.f * acc2[2]) * (sc[4] & 0xF) * 1.f/16.f +
(acc1[3] + 1.f/256.f * acc2[3]) * (sc[6] & 0xF) * 1.f/64.f) -
dmin * (sumy[0] * (sc[0] & 0xF0) + sumy[1] * (sc[2] & 0xF0) + sumy[2] * (sc[4] & 0xF0) + sumy[3] * (sc[6] & 0xF0));
qs += step/2;
sc += step;
dh += step/2;
}
y4 += 4 * QK_K;
}
#else
const int ix = tiisg/2; // 0...15
const int it = tiisg%2; // 0...1
device const float * y4 = y + ix * QK_K + 8 * it;
for (int ib = ix; ib < nb; ib += 16) {
float4 sumy = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; ++i) {
yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0];
yl[i+ 8] = y4[i+16]; sumy[1] += yl[i+ 8];
yl[i+16] = y4[i+32]; sumy[2] += yl[i+16];
yl[i+24] = y4[i+48]; sumy[3] += yl[i+24];
}
device const uint8_t * sc = (device const uint8_t *)x[ib].scales;
device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it;
device const half * dh = &x[ib].d;
for (int row = 0; row < N_DST; row++) {
float4 acc1 = {0.f, 0.f, 0.f, 0.f};
float4 acc2 = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; i += 2) {
acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003);
acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300);
acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c);
acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00);
acc1[2] += yl[i+16] * (qs[i/2] & 0x0030);
acc2[2] += yl[i+17] * (qs[i/2] & 0x3000);
acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0);
acc2[3] += yl[i+25] * (qs[i/2] & 0xc000);
}
float dall = dh[0];
float dmin = dh[1];
sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f +
(acc1[1] + 1.f/256.f * acc2[1]) * (sc[1] & 0xF) * 1.f/ 4.f +
(acc1[2] + 1.f/256.f * acc2[2]) * (sc[2] & 0xF) * 1.f/16.f +
(acc1[3] + 1.f/256.f * acc2[3]) * (sc[3] & 0xF) * 1.f/64.f) -
dmin * (sumy[0] * (sc[0] >> 4) + sumy[1] * (sc[1] >> 4) + sumy[2] * (sc[2] >> 4) + sumy[3] * (sc[3] >> 4));
qs += step/2;
sc += step;
dh += step/2;
}
y4 += 16 * QK_K;
}
#endif
for (int row = 0; row < N_DST; ++row) {
all_sum = simd_sum(sumf[row]);
if (tiisg == 0) {
dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum;
}
}
}
[[host_name("kernel_mul_mv_q2_K_f32")]]
kernel void kernel_mul_mv_q2_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
kernel_mul_mv_q2_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg);
}
#if QK_K == 256
void kernel_mul_mv_q3_K_f32_impl(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne10,
constant int64_t & ne12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const int nb = ne00/QK_K;
const int64_t r0 = tgpig.x;
const int64_t r1 = tgpig.y;
const int64_t im = tgpig.z;
const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
device const block_q3_K * x = (device const block_q3_K *) src0 + first_row*nb + offset0;
device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
float yl[32];
//const uint16_t kmask1 = 0x3030;
//const uint16_t kmask2 = 0x0f0f;
const int tid = tiisg/4;
const int ix = tiisg%4;
const int ip = tid/4; // 0 or 1
const int il = 2*((tid%4)/2); // 0 or 2
const int ir = tid%2;
const int n = 8;
const int l0 = n*ir;
// One would think that the Metal compiler would figure out that ip and il can only have
// 4 possible states, and optimize accordingly. Well, no. It needs help, and we do it
// with these two tales.
//
// Possible masks for the high bit
const ushort4 mm[4] = {{0x0001, 0x0100, 0x0002, 0x0200}, // ip = 0, il = 0
{0x0004, 0x0400, 0x0008, 0x0800}, // ip = 0, il = 2
{0x0010, 0x1000, 0x0020, 0x2000}, // ip = 1, il = 0
{0x0040, 0x4000, 0x0080, 0x8000}}; // ip = 1, il = 2
// Possible masks for the low 2 bits
const int4 qm[2] = {{0x0003, 0x0300, 0x000c, 0x0c00}, {0x0030, 0x3000, 0x00c0, 0xc000}};
const ushort4 hm = mm[2*ip + il/2];
const int shift = 2*il;
const float v1 = il == 0 ? 4.f : 64.f;
const float v2 = 4.f * v1;
const uint16_t s_shift1 = 4*ip;
const uint16_t s_shift2 = s_shift1 + il;
const int q_offset = 32*ip + l0;
const int y_offset = 128*ip + 32*il + l0;
const int step = sizeof(block_q3_K) * nb / 2;
device const float * y1 = yy + ix*QK_K + y_offset;
uint32_t scales32, aux32;
thread uint16_t * scales16 = (thread uint16_t *)&scales32;
thread const int8_t * scales = (thread const int8_t *)&scales32;
float sumf1[2] = {0.f};
float sumf2[2] = {0.f};
for (int i = ix; i < nb; i += 4) {
for (int l = 0; l < 8; ++l) {
yl[l+ 0] = y1[l+ 0];
yl[l+ 8] = y1[l+16];
yl[l+16] = y1[l+32];
yl[l+24] = y1[l+48];
}
device const uint16_t * q = (device const uint16_t *)(x[i].qs + q_offset);
device const uint16_t * h = (device const uint16_t *)(x[i].hmask + l0);
device const uint16_t * a = (device const uint16_t *)(x[i].scales);
device const half * dh = &x[i].d;
for (int row = 0; row < 2; ++row) {
const float d_all = (float)dh[0];
scales16[0] = a[4];
scales16[1] = a[5];
aux32 = ((scales32 >> s_shift2) << 4) & 0x30303030;
scales16[0] = a[il+0];
scales16[1] = a[il+1];
scales32 = ((scales32 >> s_shift1) & 0x0f0f0f0f) | aux32;
float s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0, s6 = 0;
for (int l = 0; l < n; l += 2) {
const int32_t qs = q[l/2];
s1 += yl[l+0] * (qs & qm[il/2][0]);
s2 += yl[l+1] * (qs & qm[il/2][1]);
s3 += ((h[l/2] & hm[0]) ? 0.f : yl[l+0]) + ((h[l/2] & hm[1]) ? 0.f : yl[l+1]);
s4 += yl[l+16] * (qs & qm[il/2][2]);
s5 += yl[l+17] * (qs & qm[il/2][3]);
s6 += ((h[l/2] & hm[2]) ? 0.f : yl[l+16]) + ((h[l/2] & hm[3]) ? 0.f : yl[l+17]);
}
float d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1);
float d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2);
sumf1[row] += d1 * (scales[0] - 32);
sumf2[row] += d2 * (scales[2] - 32);
s1 = s2 = s3 = s4 = s5 = s6 = 0;
for (int l = 0; l < n; l += 2) {
const int32_t qs = q[l/2+8];
s1 += yl[l+8] * (qs & qm[il/2][0]);
s2 += yl[l+9] * (qs & qm[il/2][1]);
s3 += ((h[l/2+8] & hm[0]) ? 0.f : yl[l+8]) + ((h[l/2+8] & hm[1]) ? 0.f : yl[l+9]);
s4 += yl[l+24] * (qs & qm[il/2][2]);
s5 += yl[l+25] * (qs & qm[il/2][3]);
s6 += ((h[l/2+8] & hm[2]) ? 0.f : yl[l+24]) + ((h[l/2+8] & hm[3]) ? 0.f : yl[l+25]);
}
d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1);
d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2);
sumf1[row] += d1 * (scales[1] - 32);
sumf2[row] += d2 * (scales[3] - 32);
q += step;
h += step;
a += step;
dh += step;
}
y1 += 4 * QK_K;
}
for (int row = 0; row < 2; ++row) {
const float sumf = (sumf1[row] + 0.25f * sumf2[row]) / (1 << shift);
sumf1[row] = simd_sum(sumf);
}
if (tiisg == 0) {
for (int row = 0; row < 2; ++row) {
dst[r1*ne0 + im*ne0*ne1 + first_row + row] = sumf1[row];
}
}
}
#else
void kernel_mul_mv_q3_K_f32_impl(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne10,
constant int64_t & ne12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const int nb = ne00/QK_K;
const int64_t r0 = tgpig.x;
const int64_t r1 = tgpig.y;
const int64_t im = tgpig.z;
const int row = 2 * r0 + sgitg;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
device const block_q3_K * x = (device const block_q3_K *) src0 + row*nb + offset0;
device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
const int ix = tiisg/4;
const int il = 4 * (tiisg%4);// 0, 4, 8, 12
const int iq = il/8; // 0, 0, 1, 1
const int in = il%8; // 0, 4, 0, 4
float2 sum = {0.f, 0.f};
for (int i = ix; i < nb; i += 8) {
const float d_all = (float)(x[i].d);
device const uint16_t * q = (device const uint16_t *)(x[i].qs + il);
device const uint16_t * h = (device const uint16_t *)(x[i].hmask + in);
device const uint16_t * s = (device const uint16_t *)(x[i].scales);
device const float * y = yy + i * QK_K + il;
const float d1 = d_all * ((int32_t)(s[0] & 0x000F) - 8);
const float d2 = d_all * ((int32_t)(s[0] & 0x00F0) - 128) * 1.f/64.f;
const float d3 = d_all * ((int32_t)(s[0] & 0x0F00) - 2048) * 1.f/4096.f;
const float d4 = d_all * ((int32_t)(s[0] & 0xF000) - 32768) * 1.f/262144.f;
for (int l = 0; l < 4; l += 2) {
const uint16_t hm = h[l/2] >> iq;
sum[0] += y[l+ 0] * d1 * ((int32_t)(q[l/2] & 0x0003) - ((hm & 0x0001) ? 0 : 4))
+ y[l+16] * d2 * ((int32_t)(q[l/2] & 0x000c) - ((hm & 0x0004) ? 0 : 16))
+ y[l+32] * d3 * ((int32_t)(q[l/2] & 0x0030) - ((hm & 0x0010) ? 0 : 64))
+ y[l+48] * d4 * ((int32_t)(q[l/2] & 0x00c0) - ((hm & 0x0040) ? 0 : 256));
sum[1] += y[l+ 1] * d1 * ((int32_t)(q[l/2] & 0x0300) - ((hm & 0x0100) ? 0 : 1024))
+ y[l+17] * d2 * ((int32_t)(q[l/2] & 0x0c00) - ((hm & 0x0400) ? 0 : 4096))
+ y[l+33] * d3 * ((int32_t)(q[l/2] & 0x3000) - ((hm & 0x1000) ? 0 : 16384))
+ y[l+49] * d4 * ((int32_t)(q[l/2] & 0xc000) - ((hm & 0x4000) ? 0 : 65536));
}
}
const float sumf = sum[0] + sum[1] * 1.f/256.f;
const float tot = simd_sum(sumf);
if (tiisg == 0) {
dst[r1*ne0 + im*ne0*ne1 + row] = tot;
}
}
#endif
[[host_name("kernel_mul_mv_q3_K_f32")]]
kernel void kernel_mul_mv_q3_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
kernel_mul_mv_q3_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg);
}
#if QK_K == 256
void kernel_mul_mv_q4_K_f32_impl(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne10,
constant int64_t & ne12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const uint16_t kmask1 = 0x3f3f;
const uint16_t kmask2 = 0x0f0f;
const uint16_t kmask3 = 0xc0c0;
const int ix = tiisg/8; // 0...3
const int it = tiisg%8; // 0...7
const int iq = it/4; // 0 or 1
const int ir = it%4; // 0...3
const int nb = ne00/QK_K;
const int r0 = tgpig.x;
const int r1 = tgpig.y;
const int im = tgpig.z;
//const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST;
const int first_row = r0 * N_DST;
const int ib_row = first_row * nb;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0;
device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
float yl[16];
float yh[16];
float sumf[N_DST]={0.f}, all_sum;
const int step = sizeof(block_q4_K) * nb / 2;
device const float * y4 = y + ix * QK_K + 64 * iq + 8 * ir;
uint16_t sc16[4];
thread const uint8_t * sc8 = (thread const uint8_t *)sc16;
for (int ib = ix; ib < nb; ib += 4) {
float4 sumy = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; ++i) {
yl[i+0] = y4[i+ 0]; sumy[0] += yl[i+0];
yl[i+8] = y4[i+ 32]; sumy[1] += yl[i+8];
yh[i+0] = y4[i+128]; sumy[2] += yh[i+0];
yh[i+8] = y4[i+160]; sumy[3] += yh[i+8];
}
device const uint16_t * sc = (device const uint16_t *)x[ib].scales + iq;
device const uint16_t * q1 = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir;
device const half * dh = &x[ib].d;
for (int row = 0; row < N_DST; row++) {
sc16[0] = sc[0] & kmask1;
sc16[1] = sc[2] & kmask1;
sc16[2] = ((sc[4] >> 0) & kmask2) | ((sc[0] & kmask3) >> 2);
sc16[3] = ((sc[4] >> 4) & kmask2) | ((sc[2] & kmask3) >> 2);
device const uint16_t * q2 = q1 + 32;
float4 acc1 = {0.f, 0.f, 0.f, 0.f};
float4 acc2 = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; i += 2) {
acc1[0] += yl[i+0] * (q1[i/2] & 0x000F);
acc1[1] += yl[i+1] * (q1[i/2] & 0x0F00);
acc1[2] += yl[i+8] * (q1[i/2] & 0x00F0);
acc1[3] += yl[i+9] * (q1[i/2] & 0xF000);
acc2[0] += yh[i+0] * (q2[i/2] & 0x000F);
acc2[1] += yh[i+1] * (q2[i/2] & 0x0F00);
acc2[2] += yh[i+8] * (q2[i/2] & 0x00F0);
acc2[3] += yh[i+9] * (q2[i/2] & 0xF000);
}
float dall = dh[0];
float dmin = dh[1];
sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc8[0] +
(acc1[2] + 1.f/256.f * acc1[3]) * sc8[1] * 1.f/16.f +
(acc2[0] + 1.f/256.f * acc2[1]) * sc8[4] +
(acc2[2] + 1.f/256.f * acc2[3]) * sc8[5] * 1.f/16.f) -
dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]);
q1 += step;
sc += step;
dh += step;
}
y4 += 4 * QK_K;
}
for (int row = 0; row < N_DST; ++row) {
all_sum = simd_sum(sumf[row]);
if (tiisg == 0) {
dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum;
}
}
}
#else
void kernel_mul_mv_q4_K_f32_impl(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne10,
constant int64_t & ne12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const int ix = tiisg/4; // 0...7
const int it = tiisg%4; // 0...3
const int nb = ne00/QK_K;
const int r0 = tgpig.x;
const int r1 = tgpig.y;
const int im = tgpig.z;
const int first_row = r0 * N_DST;
const int ib_row = first_row * nb;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0;
device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
float yl[8];
float yh[8];
float sumf[N_DST]={0.f}, all_sum;
const int step = sizeof(block_q4_K) * nb / 2;
device const float * y4 = y + ix * QK_K + 8 * it;
uint16_t sc16[4];
for (int ib = ix; ib < nb; ib += 8) {
float2 sumy = {0.f, 0.f};
for (int i = 0; i < 8; ++i) {
yl[i] = y4[i+ 0]; sumy[0] += yl[i];
yh[i] = y4[i+32]; sumy[1] += yh[i];
}
device const uint16_t * sc = (device const uint16_t *)x[ib].scales;
device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it;
device const half * dh = x[ib].d;
for (int row = 0; row < N_DST; row++) {
sc16[0] = sc[0] & 0x000f;
sc16[1] = sc[0] & 0x0f00;
sc16[2] = sc[0] & 0x00f0;
sc16[3] = sc[0] & 0xf000;
float2 acc1 = {0.f, 0.f};
float2 acc2 = {0.f, 0.f};
for (int i = 0; i < 8; i += 2) {
acc1[0] += yl[i+0] * (qs[i/2] & 0x000F);
acc1[1] += yl[i+1] * (qs[i/2] & 0x0F00);
acc2[0] += yh[i+0] * (qs[i/2] & 0x00F0);
acc2[1] += yh[i+1] * (qs[i/2] & 0xF000);
}
float dall = dh[0];
float dmin = dh[1];
sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc16[0] +
(acc2[0] + 1.f/256.f * acc2[1]) * sc16[1] * 1.f/4096.f) -
dmin * 1.f/16.f * (sumy[0] * sc16[2] + sumy[1] * sc16[3] * 1.f/256.f);
qs += step;
sc += step;
dh += step;
}
y4 += 8 * QK_K;
}
for (int row = 0; row < N_DST; ++row) {
all_sum = simd_sum(sumf[row]);
if (tiisg == 0) {
dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum;
}
}
}
#endif
[[host_name("kernel_mul_mv_q4_K_f32")]]
kernel void kernel_mul_mv_q4_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
kernel_mul_mv_q4_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg);
}
void kernel_mul_mv_q5_K_f32_impl(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne10,
constant int64_t & ne12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const int nb = ne00/QK_K;
const int64_t r0 = tgpig.x;
const int64_t r1 = tgpig.y;
const int im = tgpig.z;
const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
device const block_q5_K * x = (device const block_q5_K *) src0 + first_row*nb + offset0;
device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
float sumf[2]={0.f};
const int step = sizeof(block_q5_K) * nb;
#if QK_K == 256
#
float yl[16], yh[16];
const uint16_t kmask1 = 0x3f3f;
const uint16_t kmask2 = 0x0f0f;
const uint16_t kmask3 = 0xc0c0;
const int tid = tiisg/4;
const int ix = tiisg%4;
const int iq = tid/4;
const int ir = tid%4;
const int n = 8;
const int l0 = n*ir;
const int q_offset = 32*iq + l0;
const int y_offset = 64*iq + l0;
const uint8_t hm1 = 1u << (2*iq);
const uint8_t hm2 = hm1 << 1;
const uint8_t hm3 = hm1 << 4;
const uint8_t hm4 = hm2 << 4;
uint16_t sc16[4];
thread const uint8_t * sc8 = (thread const uint8_t *)sc16;
device const float * y1 = yy + ix*QK_K + y_offset;
for (int i = ix; i < nb; i += 4) {
device const uint8_t * q1 = x[i].qs + q_offset;
device const uint8_t * qh = x[i].qh + l0;
device const half * dh = &x[i].d;
device const uint16_t * a = (device const uint16_t *)x[i].scales + iq;
device const float * y2 = y1 + 128;
float4 sumy = {0.f, 0.f, 0.f, 0.f};
for (int l = 0; l < 8; ++l) {
yl[l+0] = y1[l+ 0]; sumy[0] += yl[l+0];
yl[l+8] = y1[l+32]; sumy[1] += yl[l+8];
yh[l+0] = y2[l+ 0]; sumy[2] += yh[l+0];
yh[l+8] = y2[l+32]; sumy[3] += yh[l+8];
}
for (int row = 0; row < 2; ++row) {
device const uint8_t * q2 = q1 + 64;
sc16[0] = a[0] & kmask1;
sc16[1] = a[2] & kmask1;
sc16[2] = ((a[4] >> 0) & kmask2) | ((a[0] & kmask3) >> 2);
sc16[3] = ((a[4] >> 4) & kmask2) | ((a[2] & kmask3) >> 2);
float4 acc1 = {0.f};
float4 acc2 = {0.f};
for (int l = 0; l < n; ++l) {
uint8_t h = qh[l];
acc1[0] += yl[l+0] * (q1[l] & 0x0F);
acc1[1] += yl[l+8] * (q1[l] & 0xF0);
acc1[2] += yh[l+0] * (q2[l] & 0x0F);
acc1[3] += yh[l+8] * (q2[l] & 0xF0);
acc2[0] += h & hm1 ? yl[l+0] : 0.f;
acc2[1] += h & hm2 ? yl[l+8] : 0.f;
acc2[2] += h & hm3 ? yh[l+0] : 0.f;
acc2[3] += h & hm4 ? yh[l+8] : 0.f;
}
const float dall = dh[0];
const float dmin = dh[1];
sumf[row] += dall * (sc8[0] * (acc1[0] + 16.f*acc2[0]) +
sc8[1] * (acc1[1]/16.f + 16.f*acc2[1]) +
sc8[4] * (acc1[2] + 16.f*acc2[2]) +
sc8[5] * (acc1[3]/16.f + 16.f*acc2[3])) -
dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]);
q1 += step;
qh += step;
dh += step/2;
a += step/2;
}
y1 += 4 * QK_K;
}
#else
float yl[8], yh[8];
const int il = 4 * (tiisg/8); // 0, 4, 8, 12
const int ix = tiisg%8;
const int iq = il/8; // 0, 0, 1, 1
const int in = il%8; // 0, 4, 0, 4
device const float * y = yy + ix*QK_K + il;
for (int i = ix; i < nb; i += 8) {
for (int l = 0; l < 4; ++l) {
yl[l+0] = y[l+ 0];
yl[l+4] = y[l+16];
yh[l+0] = y[l+32];
yh[l+4] = y[l+48];
}
device const half * dh = &x[i].d;
device const uint8_t * q = x[i].qs + il;
device const uint8_t * h = x[i].qh + in;
device const int8_t * s = x[i].scales;
for (int row = 0; row < 2; ++row) {
const float d = dh[0];
float2 acc = {0.f, 0.f};
for (int l = 0; l < 4; ++l) {
const uint8_t hl = h[l] >> iq;
acc[0] += yl[l+0] * s[0] * ((int16_t)(q[l+ 0] & 0x0F) - (hl & 0x01 ? 0 : 16))
+ yl[l+4] * s[1] * ((int16_t)(q[l+16] & 0x0F) - (hl & 0x04 ? 0 : 16));
acc[1] += yh[l+0] * s[2] * ((int16_t)(q[l+ 0] & 0xF0) - (hl & 0x10 ? 0 : 256))
+ yh[l+4] * s[3] * ((int16_t)(q[l+16] & 0xF0) - (hl & 0x40 ? 0 : 256));
}
sumf[row] += d * (acc[0] + 1.f/16.f * acc[1]);
q += step;
h += step;
s += step;
dh += step/2;
}
y += 8 * QK_K;
}
#endif
for (int row = 0; row < 2; ++row) {
const float tot = simd_sum(sumf[row]);
if (tiisg == 0) {
dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot;
}
}
}
[[host_name("kernel_mul_mv_q5_K_f32")]]
kernel void kernel_mul_mv_q5_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
kernel_mul_mv_q5_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg);
}
void kernel_mul_mv_q6_K_f32_impl(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant int64_t & ne10,
constant int64_t & ne12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const uint8_t kmask1 = 0x03;
const uint8_t kmask2 = 0x0C;
const uint8_t kmask3 = 0x30;
const uint8_t kmask4 = 0xC0;
const int nb = ne00/QK_K;
const int64_t r0 = tgpig.x;
const int64_t r1 = tgpig.y;
const int im = tgpig.z;
const int row = 2 * r0 + sgitg;
const uint i12 = im%ne12;
const uint i13 = im/ne12;
const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
device const block_q6_K * x = (device const block_q6_K *) src0 + row * nb + offset0;
device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
float sumf = 0;
#if QK_K == 256
const int tid = tiisg/2;
const int ix = tiisg%2;
const int ip = tid/8; // 0 or 1
const int il = tid%8;
const int n = 4;
const int l0 = n*il;
const int is = 8*ip + l0/16;
const int y_offset = 128*ip + l0;
const int q_offset_l = 64*ip + l0;
const int q_offset_h = 32*ip + l0;
for (int i = ix; i < nb; i += 2) {
device const uint8_t * q1 = x[i].ql + q_offset_l;
device const uint8_t * q2 = q1 + 32;
device const uint8_t * qh = x[i].qh + q_offset_h;
device const int8_t * sc = x[i].scales + is;
device const float * y = yy + i * QK_K + y_offset;
const float dall = x[i].d;
float4 sums = {0.f, 0.f, 0.f, 0.f};
for (int l = 0; l < n; ++l) {
sums[0] += y[l+ 0] * ((int8_t)((q1[l] & 0xF) | ((qh[l] & kmask1) << 4)) - 32);
sums[1] += y[l+32] * ((int8_t)((q2[l] & 0xF) | ((qh[l] & kmask2) << 2)) - 32);
sums[2] += y[l+64] * ((int8_t)((q1[l] >> 4) | ((qh[l] & kmask3) << 0)) - 32);
sums[3] += y[l+96] * ((int8_t)((q2[l] >> 4) | ((qh[l] & kmask4) >> 2)) - 32);
}
sumf += dall * (sums[0] * sc[0] + sums[1] * sc[2] + sums[2] * sc[4] + sums[3] * sc[6]);
}
#else
const int ix = tiisg/4;
const int il = 4*(tiisg%4);
for (int i = ix; i < nb; i += 8) {
device const float * y = yy + i * QK_K + il;
device const uint8_t * ql = x[i].ql + il;
device const uint8_t * qh = x[i].qh + il;
device const int8_t * s = x[i].scales;
const float d = x[i].d;
float4 sums = {0.f, 0.f, 0.f, 0.f};
for (int l = 0; l < 4; ++l) {
sums[0] += y[l+ 0] * ((int8_t)((ql[l+ 0] & 0xF) | ((qh[l] & kmask1) << 4)) - 32);
sums[1] += y[l+16] * ((int8_t)((ql[l+16] & 0xF) | ((qh[l] & kmask2) << 2)) - 32);
sums[2] += y[l+32] * ((int8_t)((ql[l+ 0] >> 4) | ((qh[l] & kmask3) >> 0)) - 32);
sums[3] += y[l+48] * ((int8_t)((ql[l+16] >> 4) | ((qh[l] & kmask4) >> 2)) - 32);
}
sumf += d * (sums[0] * s[0] + sums[1] * s[1] + sums[2] * s[2] + sums[3] * s[3]);
}
#endif
const float tot = simd_sum(sumf);
if (tiisg == 0) {
dst[r1*ne0 + im*ne0*ne1 + row] = tot;
}
}
[[host_name("kernel_mul_mv_q6_K_f32")]]
kernel void kernel_mul_mv_q6_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
kernel_mul_mv_q6_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg);
}
//============================= templates and their specializations =============================
// NOTE: this is not dequantizing - we are simply fitting the template
template <typename type4x4>
void dequantize_f32(device const float4x4 * src, short il, thread type4x4 & reg) {
float4x4 temp = *(((device float4x4 *)src));
for (int i = 0; i < 16; i++){
reg[i/4][i%4] = temp[i/4][i%4];
}
}
template <typename type4x4>
void dequantize_f16(device const half4x4 * src, short il, thread type4x4 & reg) {
half4x4 temp = *(((device half4x4 *)src));
for (int i = 0; i < 16; i++){
reg[i/4][i%4] = temp[i/4][i%4];
}
}
template <typename type4x4>
void dequantize_q4_0(device const block_q4_0 *xb, short il, thread type4x4 & reg) {
device const uint16_t * qs = ((device const uint16_t *)xb + 1);
const float d1 = il ? (xb->d / 16.h) : xb->d;
const float d2 = d1 / 256.f;
const float md = -8.h * xb->d;
const ushort mask0 = il ? 0x00F0 : 0x000F;
const ushort mask1 = mask0 << 8;
for (int i=0;i<8;i++) {
reg[i/2][2*(i%2)+0] = d1 * (qs[i] & mask0) + md;
reg[i/2][2*(i%2)+1] = d2 * (qs[i] & mask1) + md;
}
}
template <typename type4x4>
void dequantize_q4_1(device const block_q4_1 *xb, short il, thread type4x4 & reg) {
device const uint16_t * qs = ((device const uint16_t *)xb + 2);
const float d1 = il ? (xb->d / 16.h) : xb->d;
const float d2 = d1 / 256.f;
const float m = xb->m;
const ushort mask0 = il ? 0x00F0 : 0x000F;
const ushort mask1 = mask0 << 8;
for (int i=0;i<8;i++) {
reg[i/2][2*(i%2)+0] = ((qs[i] & mask0) * d1) + m;
reg[i/2][2*(i%2)+1] = ((qs[i] & mask1) * d2) + m;
}
}
template <typename type4x4>
void dequantize_q5_0(device const block_q5_0 *xb, short il, thread type4x4 & reg) {
device const uint16_t * qs = ((device const uint16_t *)xb + 3);
const float d = xb->d;
const float md = -16.h * xb->d;
const ushort mask = il ? 0x00F0 : 0x000F;
const uint32_t qh = *((device const uint32_t *)xb->qh);
const int x_mv = il ? 4 : 0;
const int gh_mv = il ? 12 : 0;
const int gh_bk = il ? 0 : 4;
for (int i = 0; i < 8; i++) {
// extract the 5-th bits for x0 and x1
const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10;
const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10;
// combine the 4-bits from qs with the 5th bit
const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0);
const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1);
reg[i/2][2*(i%2)+0] = d * x0 + md;
reg[i/2][2*(i%2)+1] = d * x1 + md;
}
}
template <typename type4x4>
void dequantize_q5_1(device const block_q5_1 *xb, short il, thread type4x4 & reg) {
device const uint16_t * qs = ((device const uint16_t *)xb + 4);
const float d = xb->d;
const float m = xb->m;
const ushort mask = il ? 0x00F0 : 0x000F;
const uint32_t qh = *((device const uint32_t *)xb->qh);
const int x_mv = il ? 4 : 0;
const int gh_mv = il ? 12 : 0;
const int gh_bk = il ? 0 : 4;
for (int i = 0; i < 8; i++) {
// extract the 5-th bits for x0 and x1
const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10;
const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10;
// combine the 4-bits from qs with the 5th bit
const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0);
const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1);
reg[i/2][2*(i%2)+0] = d * x0 + m;
reg[i/2][2*(i%2)+1] = d * x1 + m;
}
}
template <typename type4x4>
void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) {
device const int8_t * qs = ((device const int8_t *)xb->qs);
const half d = xb->d;
for (int i = 0; i < 16; i++) {
reg[i/4][i%4] = (qs[i + 16*il] * d);
}
}
template <typename type4x4>
void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) {
const float d = xb->d;
const float min = xb->dmin;
device const uint8_t * q = (device const uint8_t *)xb->qs;
float dl, ml;
uint8_t sc = xb->scales[il];
#if QK_K == 256
q = q + 32*(il/8) + 16*(il&1);
il = (il/2)%4;
#endif
half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h);
uchar mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
dl = d * (sc & 0xF) * coef, ml = min * (sc >> 4);
for (int i = 0; i < 16; ++i) {
reg[i/4][i%4] = dl * (q[i] & mask) - ml;
}
}
template <typename type4x4>
void dequantize_q3_K(device const block_q3_K *xb, short il, thread type4x4 & reg) {
const half d_all = xb->d;
device const uint8_t * q = (device const uint8_t *)xb->qs;
device const uint8_t * h = (device const uint8_t *)xb->hmask;
device const int8_t * scales = (device const int8_t *)xb->scales;
#if QK_K == 256
q = q + 32 * (il/8) + 16 * (il&1);
h = h + 16 * (il&1);
uint8_t m = 1 << (il/2);
uint16_t kmask1 = (il/4)>1 ? ((il/4)>2 ? 192 : 48) : \
((il/4)>0 ? 12 : 3);
uint16_t kmask2 = il/8 ? 0xF0 : 0x0F;
uint16_t scale_2 = scales[il%8], scale_1 = scales[8 + il%4];
int16_t dl_int = (il/4)&1 ? (scale_2&kmask2) | ((scale_1&kmask1) << 2)
: (scale_2&kmask2) | ((scale_1&kmask1) << 4);
half dl = il<8 ? d_all * (dl_int - 32.h) : d_all * (dl_int / 16.h - 32.h);
const half ml = 4.h * dl;
il = (il/2) & 3;
const half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h);
const uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
dl *= coef;
for (int i = 0; i < 16; ++i) {
reg[i/4][i%4] = dl * (q[i] & mask) - (h[i] & m ? 0 : ml);
}
#else
float kcoef = il&1 ? 1.f/16.f : 1.f;
uint16_t kmask = il&1 ? 0xF0 : 0x0F;
float dl = d_all * ((scales[il/2] & kmask) * kcoef - 8);
float coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h);
uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
uint8_t m = 1<<(il*2);
for (int i = 0; i < 16; ++i) {
reg[i/4][i%4] = coef * dl * ((q[i] & mask) - ((h[i%8] & (m * (1 + i/8))) ? 0 : 4.f/coef));
}
#endif
}
static inline uchar2 get_scale_min_k4_just2(int j, int k, device const uchar * q) {
return j < 4 ? uchar2{uchar(q[j+0+k] & 63), uchar(q[j+4+k] & 63)}
: uchar2{uchar((q[j+4+k] & 0xF) | ((q[j-4+k] & 0xc0) >> 2)), uchar((q[j+4+k] >> 4) | ((q[j-0+k] & 0xc0) >> 2))};
}
template <typename type4x4>
void dequantize_q4_K(device const block_q4_K *xb, short il, thread type4x4 & reg) {
device const uchar * q = xb->qs;
#if QK_K == 256
short is = (il/4) * 2;
q = q + (il/4) * 32 + 16 * (il&1);
il = il & 3;
const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales);
const float d = il < 2 ? xb->d : xb->d / 16.h;
const float min = xb->dmin;
const float dl = d * sc[0];
const float ml = min * sc[1];
#else
q = q + 16 * (il&1);
device const uint8_t * s = xb->scales;
device const half2 * dh = (device const half2 *)xb->d;
const float2 d = (float2)dh[0];
const float dl = il<2 ? d[0] * (s[0]&0xF) : d[0] * (s[1]&0xF)/16.h;
const float ml = il<2 ? d[1] * (s[0]>>4) : d[1] * (s[1]>>4);
#endif
const ushort mask = il<2 ? 0x0F : 0xF0;
for (int i = 0; i < 16; ++i) {
reg[i/4][i%4] = dl * (q[i] & mask) - ml;
}
}
template <typename type4x4>
void dequantize_q5_K(device const block_q5_K *xb, short il, thread type4x4 & reg) {
device const uint8_t * q = xb->qs;
device const uint8_t * qh = xb->qh;
#if QK_K == 256
short is = (il/4) * 2;
q = q + 32 * (il/4) + 16 * (il&1);
qh = qh + 16 * (il&1);
uint8_t ul = 1 << (il/2);
il = il & 3;
const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales);
const float d = il < 2 ? xb->d : xb->d / 16.h;
const float min = xb->dmin;
const float dl = d * sc[0];
const float ml = min * sc[1];
const ushort mask = il<2 ? 0x0F : 0xF0;
const float qh_val = il<2 ? 16.f : 256.f;
for (int i = 0; i < 16; ++i) {
reg[i/4][i%4] = dl * ((q[i] & mask) + (qh[i] & ul ? qh_val : 0)) - ml;
}
#else
q = q + 16 * (il&1);
device const int8_t * s = xb->scales;
const float dl = xb->d * s[il];
uint8_t m = 1<<(il*2);
const float coef = il<2 ? 1.f : 1.f/16.f;
const ushort mask = il<2 ? 0x0F : 0xF0;
for (int i = 0; i < 16; ++i) {
reg[i/4][i%4] = coef * dl * ((q[i] & mask) - (qh[i%8] & (m*(1+i/8)) ? 0.f : 16.f/coef));
}
#endif
}
template <typename type4x4>
void dequantize_q6_K(device const block_q6_K *xb, short il, thread type4x4 & reg) {
const half d_all = xb->d;
device const uint8_t * ql = (device const uint8_t *)xb->ql;
device const uint8_t * qh = (device const uint8_t *)xb->qh;
device const int8_t * scales = (device const int8_t *)xb->scales;
#if QK_K == 256
ql = ql + 64*(il/8) + 32*((il/2)&1) + 16*(il&1);
qh = qh + 32*(il/8) + 16*(il&1);
half sc = scales[(il%2) + 2 * ((il/2))];
il = (il/2) & 3;
#else
ql = ql + 16 * (il&1);
half sc = scales[il];
#endif
const uint16_t kmask1 = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
const uint16_t kmask2 = il>1 ? 0xF0 : 0x0F;
const half coef = il>1 ? 1.f/16.h : 1.h;
const half ml = d_all * sc * 32.h;
const half dl = d_all * sc * coef;
for (int i = 0; i < 16; ++i) {
const half q = il&1 ? ((ql[i] & kmask2) | ((qh[i] & kmask1) << 2))
: ((ql[i] & kmask2) | ((qh[i] & kmask1) << 4));
reg[i/4][i%4] = dl * q - ml;
}
}
template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread float4x4 &)>
kernel void kernel_get_rows(
device const void * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb1,
constant uint64_t & nb2,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint3 tptg [[threads_per_threadgroup]]) {
//const int64_t i = tgpig;
//const int64_t r = ((device int32_t *) src1)[i];
const int64_t i10 = tgpig.x;
const int64_t i11 = tgpig.y;
const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0];
const int64_t i02 = i11;
for (int64_t ind = tiitg; ind < ne00/16; ind += tptg.x) {
float4x4 temp;
dequantize_func(
((device const block_q *) ((device char *) src0 + r*nb01 + i02*nb02)) + ind/nl, ind%nl, temp);
*(((device float4x4 *) ((device char *) dst + i11*nb2 + i10*nb1)) + ind) = temp;
}
}
kernel void kernel_get_rows_f32(
device const void * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb1,
constant uint64_t & nb2,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint3 tptg [[threads_per_threadgroup]]) {
const int64_t i10 = tgpig.x;
const int64_t i11 = tgpig.y;
const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0];
const int64_t i02 = i11;
for (int ind = tiitg; ind < ne00; ind += tptg.x) {
((device float *) ((device char *) dst + i11*nb2 + i10*nb1))[ind] =
((device float *) ((device char *) src0 + r*nb01 + i02*nb02))[ind];
}
}
kernel void kernel_get_rows_f16(
device const void * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb1,
constant uint64_t & nb2,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint3 tptg [[threads_per_threadgroup]]) {
const int64_t i10 = tgpig.x;
const int64_t i11 = tgpig.y;
const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0];
const int64_t i02 = i11;
for (int ind = tiitg; ind < ne00; ind += tptg.x) {
((device float *) ((device char *) dst + i11*nb2 + i10*nb1))[ind] =
((device half *) ((device char *) src0 + r*nb01 + i02*nb02))[ind];
}
}
#define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A
#define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix B
#define BLOCK_SIZE_K 32
#define THREAD_MAT_M 4 // each thread take 4 simdgroup matrices from matrix A
#define THREAD_MAT_N 2 // each thread take 2 simdgroup matrices from matrix B
#define THREAD_PER_BLOCK 128
#define THREAD_PER_ROW 2 // 2 thread for each row in matrix A to load numbers
#define THREAD_PER_COL 4 // 4 thread for each row in matrix B to load numbers
#define SG_MAT_SIZE 64 // simdgroup matrix is of shape 8x8
#define SG_MAT_ROW 8
// each block_q contains 16*nl weights
template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)>
void kernel_mul_mm_impl(device const uchar * src0,
device const uchar * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne02,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
threadgroup uchar * shared_memory [[threadgroup(0)]],
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
threadgroup half * sa = (threadgroup half *)(shared_memory);
threadgroup float * sb = (threadgroup float *)(shared_memory + 4096);
const uint r0 = tgpig.y;
const uint r1 = tgpig.x;
const uint im = tgpig.z;
// if this block is of 64x32 shape or smaller
short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M;
short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N;
// a thread shouldn't load data outside of the matrix
short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1;
short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1;
simdgroup_half8x8 ma[4];
simdgroup_float8x8 mb[2];
simdgroup_float8x8 c_res[8];
for (int i = 0; i < 8; i++){
c_res[i] = make_filled_simdgroup_matrix<float, 8>(0.f);
}
short il = (tiitg % THREAD_PER_ROW);
const uint i12 = im%ne12;
const uint i13 = im/ne12;
uint offset0 = (i12/r2)*nb02 + (i13/r3)*(nb02*ne02);
ushort offset1 = il/nl;
device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01 + offset0) + offset1;
device const float * y = (device const float *)(src1
+ nb12 * im
+ nb11 * (r1 * BLOCK_SIZE_N + thread_col)
+ nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL)));
for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) {
// load data and store to threadgroup memory
half4x4 temp_a;
dequantize_func(x, il, temp_a);
threadgroup_barrier(mem_flags::mem_threadgroup);
#pragma unroll(16)
for (int i = 0; i < 16; i++) {
*(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \
+ (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \
+ (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4];
}
*(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y);
il = (il + 2 < nl) ? il + 2 : il % 2;
x = (il < 2) ? x + (2+nl-1)/nl : x;
y += BLOCK_SIZE_K;
threadgroup_barrier(mem_flags::mem_threadgroup);
// load matrices from threadgroup memory and conduct outer products
threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2));
threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2));
#pragma unroll(4)
for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) {
#pragma unroll(4)
for (int i = 0; i < 4; i++) {
simdgroup_load(ma[i],lsma + SG_MAT_SIZE * i);
}
simdgroup_barrier(mem_flags::mem_none);
#pragma unroll(2)
for (int i = 0; i < 2; i++) {
simdgroup_load(mb[i],lsmb + SG_MAT_SIZE * i);
}
lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE;
lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE;
#pragma unroll(8)
for (int i = 0; i < 8; i++){
simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]);
}
}
}
if ((r0 + 1) * BLOCK_SIZE_M <= ne0 && (r1 + 1) * BLOCK_SIZE_N <= ne1) {
device float * C = dst + (BLOCK_SIZE_M * r0 + 32 * (sgitg & 1)) \
+ (BLOCK_SIZE_N * r1 + 16 * (sgitg >> 1)) * ne0 + im*ne1*ne0;
for (int i = 0; i < 8; i++) {
simdgroup_store(c_res[i], C + 8 * (i%4) + 8 * ne0 * (i/4), ne0);
}
} else {
// block is smaller than 64x32, we should avoid writing data outside of the matrix
threadgroup_barrier(mem_flags::mem_threadgroup);
threadgroup float * temp_str = ((threadgroup float *)shared_memory) \
+ 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M;
for (int i = 0; i < 8; i++) {
simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
}
threadgroup_barrier(mem_flags::mem_threadgroup);
device float * C = dst + (BLOCK_SIZE_M * r0) + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0;
if (sgitg == 0) {
for (int i = 0; i < n_rows; i++) {
for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) {
*(C + i + j * ne0) = *(temp_str + i + j * BLOCK_SIZE_M);
}
}
}
}
}
// same as kernel_mul_mm_impl, but src1 and dst are accessed via indices stored in src1ids
template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)>
void kernel_mul_mm_id_impl(
device const uchar * src0,
device const uchar * src1,
thread short * src1ids,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne02,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
int64_t ne1,
constant uint & r2,
constant uint & r3,
threadgroup uchar * shared_memory,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
threadgroup half * sa = (threadgroup half *)(shared_memory);
threadgroup float * sb = (threadgroup float *)(shared_memory + 4096);
const uint r0 = tgpig.y;
const uint r1 = tgpig.x;
const uint im = tgpig.z;
if (r1 * BLOCK_SIZE_N >= ne1) return;
// if this block is of 64x32 shape or smaller
short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M;
short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N;
// a thread shouldn't load data outside of the matrix
short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1;
short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1;
simdgroup_half8x8 ma[4];
simdgroup_float8x8 mb[2];
simdgroup_float8x8 c_res[8];
for (int i = 0; i < 8; i++){
c_res[i] = make_filled_simdgroup_matrix<float, 8>(0.f);
}
short il = (tiitg % THREAD_PER_ROW);
const uint i12 = im%ne12;
const uint i13 = im/ne12;
uint offset0 = (i12/r2)*nb02 + (i13/r3)*(nb02*ne02);
ushort offset1 = il/nl;
device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01 + offset0) + offset1;
device const float * y = (device const float *)(src1
+ nb12 * im
+ nb11 * src1ids[r1 * BLOCK_SIZE_N + thread_col]
+ nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL)));
for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) {
// load data and store to threadgroup memory
half4x4 temp_a;
dequantize_func(x, il, temp_a);
threadgroup_barrier(mem_flags::mem_threadgroup);
for (int i = 0; i < 16; i++) {
*(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \
+ (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \
+ (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4];
}
*(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y);
il = (il + 2 < nl) ? il + 2 : il % 2;
x = (il < 2) ? x + (2+nl-1)/nl : x;
y += BLOCK_SIZE_K;
threadgroup_barrier(mem_flags::mem_threadgroup);
// load matrices from threadgroup memory and conduct outer products
threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2));
threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2));
for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) {
for (int i = 0; i < 4; i++) {
simdgroup_load(ma[i],lsma + SG_MAT_SIZE * i);
}
simdgroup_barrier(mem_flags::mem_none);
for (int i = 0; i < 2; i++) {
simdgroup_load(mb[i],lsmb + SG_MAT_SIZE * i);
}
lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE;
lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE;
for (int i = 0; i < 8; i++){
simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]);
}
}
}
{
threadgroup_barrier(mem_flags::mem_threadgroup);
threadgroup float * temp_str = ((threadgroup float *)shared_memory) \
+ 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M;
for (int i = 0; i < 8; i++) {
simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
}
threadgroup_barrier(mem_flags::mem_threadgroup);
device float * C = dst + (BLOCK_SIZE_M * r0) + im*ne1*ne0;
if (sgitg == 0) {
for (int i = 0; i < n_rows; i++) {
for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) {
*(C + i + src1ids[j + r1*BLOCK_SIZE_N] * ne0) = *(temp_str + i + j * BLOCK_SIZE_M);
}
}
}
}
}
template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)>
kernel void kernel_mul_mm(device const uchar * src0,
device const uchar * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne02,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
threadgroup uchar * shared_memory [[threadgroup(0)]],
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
kernel_mul_mm_impl<block_q, nl, dequantize_func>(
src0,
src1,
dst,
ne00,
ne02,
nb01,
nb02,
ne12,
nb10,
nb11,
nb12,
ne0,
ne1,
r2,
r3,
shared_memory,
tgpig,
tiitg,
sgitg);
}
template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)>
kernel void kernel_mul_mm_id(
device const uchar * ids,
device const uchar * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne02,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const uchar * src00,
device const uchar * src01,
device const uchar * src02,
device const uchar * src03,
device const uchar * src04,
device const uchar * src05,
device const uchar * src06,
device const uchar * src07,
threadgroup uchar * shared_memory [[threadgroup(0)]],
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const uchar * src0s[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
// expert id
const int32_t id = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
// row indices of src1 for expert id
int64_t _ne1 = 0;
short src1ids[512];
for (int64_t i1 = 0; i1 < ne1; i1++) {
if (((device int32_t *) (ids + i1*nbi1))[idx] == id) {
src1ids[_ne1++] = i1;
}
}
kernel_mul_mm_id_impl<block_q, nl, dequantize_func>(
src0s[id],
src1,
src1ids,
dst,
ne00,
ne02,
nb01,
nb02,
ne12,
nb10,
nb11,
nb12,
ne0,
_ne1,
r2,
r3,
shared_memory,
tgpig,
tiitg,
sgitg);
}
#if QK_K == 256
#define QK_NL 16
#else
#define QK_NL 4
#endif
//
// get rows
//
typedef void (get_rows_t)(
device const void * src0,
device const char * src1,
device float * dst,
constant int64_t & ne00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb1,
constant uint64_t & nb2,
uint3, uint, uint3);
//template [[host_name("kernel_get_rows_f32")]] kernel get_rows_t kernel_get_rows<float4x4, 1, dequantize_f32>;
//template [[host_name("kernel_get_rows_f16")]] kernel get_rows_t kernel_get_rows<half4x4, 1, dequantize_f16>;
template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_t kernel_get_rows<block_q4_0, 2, dequantize_q4_0>;
template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_t kernel_get_rows<block_q4_1, 2, dequantize_q4_1>;
template [[host_name("kernel_get_rows_q5_0")]] kernel get_rows_t kernel_get_rows<block_q5_0, 2, dequantize_q5_0>;
template [[host_name("kernel_get_rows_q5_1")]] kernel get_rows_t kernel_get_rows<block_q5_1, 2, dequantize_q5_1>;
template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_t kernel_get_rows<block_q8_0, 2, dequantize_q8_0>;
template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_t kernel_get_rows<block_q2_K, QK_NL, dequantize_q2_K>;
template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_t kernel_get_rows<block_q3_K, QK_NL, dequantize_q3_K>;
template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_t kernel_get_rows<block_q4_K, QK_NL, dequantize_q4_K>;
template [[host_name("kernel_get_rows_q5_K")]] kernel get_rows_t kernel_get_rows<block_q5_K, QK_NL, dequantize_q5_K>;
template [[host_name("kernel_get_rows_q6_K")]] kernel get_rows_t kernel_get_rows<block_q6_K, QK_NL, dequantize_q6_K>;
//
// matrix-matrix multiplication
//
typedef void (mat_mm_t)(
device const uchar * src0,
device const uchar * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne02,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint & r2,
constant uint & r3,
threadgroup uchar *,
uint3, uint, uint);
template [[host_name("kernel_mul_mm_f32_f32")]] kernel mat_mm_t kernel_mul_mm<float4x4, 1, dequantize_f32>;
template [[host_name("kernel_mul_mm_f16_f32")]] kernel mat_mm_t kernel_mul_mm<half4x4, 1, dequantize_f16>;
template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_0, 2, dequantize_q4_0>;
template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_1, 2, dequantize_q4_1>;
template [[host_name("kernel_mul_mm_q5_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_0, 2, dequantize_q5_0>;
template [[host_name("kernel_mul_mm_q5_1_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_1, 2, dequantize_q5_1>;
template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q8_0, 2, dequantize_q8_0>;
template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q2_K, QK_NL, dequantize_q2_K>;
template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q3_K, QK_NL, dequantize_q3_K>;
template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_K, QK_NL, dequantize_q4_K>;
template [[host_name("kernel_mul_mm_q5_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_K, QK_NL, dequantize_q5_K>;
template [[host_name("kernel_mul_mm_q6_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q6_K, QK_NL, dequantize_q6_K>;
//
// indirect matrix-matrix multiplication
//
typedef void (mat_mm_id_t)(
device const uchar * ids,
device const uchar * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne02,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const uchar * src00,
device const uchar * src01,
device const uchar * src02,
device const uchar * src03,
device const uchar * src04,
device const uchar * src05,
device const uchar * src06,
device const uchar * src07,
threadgroup uchar *,
uint3, uint, uint);
template [[host_name("kernel_mul_mm_id_f32_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<float4x4, 1, dequantize_f32>;
template [[host_name("kernel_mul_mm_id_f16_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<half4x4, 1, dequantize_f16>;
template [[host_name("kernel_mul_mm_id_q4_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q4_0, 2, dequantize_q4_0>;
template [[host_name("kernel_mul_mm_id_q4_1_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q4_1, 2, dequantize_q4_1>;
template [[host_name("kernel_mul_mm_id_q5_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q5_0, 2, dequantize_q5_0>;
template [[host_name("kernel_mul_mm_id_q5_1_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q5_1, 2, dequantize_q5_1>;
template [[host_name("kernel_mul_mm_id_q8_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q8_0, 2, dequantize_q8_0>;
template [[host_name("kernel_mul_mm_id_q2_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q2_K, QK_NL, dequantize_q2_K>;
template [[host_name("kernel_mul_mm_id_q3_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q3_K, QK_NL, dequantize_q3_K>;
template [[host_name("kernel_mul_mm_id_q4_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q4_K, QK_NL, dequantize_q4_K>;
template [[host_name("kernel_mul_mm_id_q5_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q5_K, QK_NL, dequantize_q5_K>;
template [[host_name("kernel_mul_mm_id_q6_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q6_K, QK_NL, dequantize_q6_K>;
//
// matrix-vector multiplication
//
[[host_name("kernel_mul_mv_id_f32_f32")]]
kernel void kernel_mul_mv_id_f32_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
kernel_mul_mv_f32_f32_impl(
src0[id],
src1 + bid*nb11,
dst + bid*ne0,
ne00,
ne01,
ne02,
nb00,
nb01,
nb02,
ne10,
ne11,
ne12,
nb10,
nb11,
nb12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg);
}
[[host_name("kernel_mul_mv_id_f16_f32")]]
kernel void kernel_mul_mv_id_f16_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
kernel_mul_mv_f16_f32_impl(
src0[id],
src1 + bid*nb11,
dst + bid*ne0,
ne00,
ne01,
ne02,
nb00,
nb01,
nb02,
ne10,
ne11,
ne12,
nb10,
nb11,
nb12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg);
}
[[host_name("kernel_mul_mv_id_q8_0_f32")]]
kernel void kernel_mul_mv_id_q8_0_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
kernel_mul_mv_q8_0_f32_impl(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
[[host_name("kernel_mul_mv_id_q4_0_f32")]]
kernel void kernel_mul_mv_id_q4_0_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
mul_vec_q_n_f32_impl<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
[[host_name("kernel_mul_mv_id_q4_1_f32")]]
kernel void kernel_mul_mv_id_q4_1_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
mul_vec_q_n_f32_impl<block_q4_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
[[host_name("kernel_mul_mv_id_q5_0_f32")]]
kernel void kernel_mul_mv_id_q5_0_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
mul_vec_q_n_f32_impl<block_q5_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
[[host_name("kernel_mul_mv_id_q5_1_f32")]]
kernel void kernel_mul_mv_id_q5_1_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
mul_vec_q_n_f32_impl<block_q5_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
[[host_name("kernel_mul_mv_id_q2_K_f32")]]
kernel void kernel_mul_mv_id_q2_K_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
kernel_mul_mv_q2_K_f32_impl(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
[[host_name("kernel_mul_mv_id_q3_K_f32")]]
kernel void kernel_mul_mv_id_q3_K_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
kernel_mul_mv_q3_K_f32_impl(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
[[host_name("kernel_mul_mv_id_q4_K_f32")]]
kernel void kernel_mul_mv_id_q4_K_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
kernel_mul_mv_q4_K_f32_impl(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
[[host_name("kernel_mul_mv_id_q5_K_f32")]]
kernel void kernel_mul_mv_id_q5_K_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
kernel_mul_mv_q5_K_f32_impl(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
[[host_name("kernel_mul_mv_id_q6_K_f32")]]
kernel void kernel_mul_mv_id_q6_K_f32(
device const char * ids,
device const char * src1,
device float * dst,
constant uint64_t & nbi1,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant int64_t & ne13,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
constant int64_t & ne0,
constant int64_t & ne1,
constant uint64_t & nb1,
constant uint & r2,
constant uint & r3,
constant int & idx,
device const char * src00,
device const char * src01,
device const char * src02,
device const char * src03,
device const char * src04,
device const char * src05,
device const char * src06,
device const char * src07,
uint3 tgpig[[threadgroup_position_in_grid]],
uint tiitg[[thread_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07};
const int64_t bid = tgpig.z/(ne12*ne13);
tgpig.z = tgpig.z%(ne12*ne13);
const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx];
kernel_mul_mv_q6_K_f32_impl(
src0[id],
(device const float *) (src1 + bid*nb11),
dst + bid*ne0,
ne00,
ne01,
ne02,
ne10,
ne12,
ne0,
ne1,
r2,
r3,
tgpig,
tiisg,
sgitg);
}
| candle/candle-metal-kernels/src/quantized.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/quantized.metal",
"repo_id": "candle",
"token_count": 97268
} | 28 |
//! Convolution Layers.
use crate::BatchNorm;
use candle::{Result, Tensor};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Conv1dConfig {
pub padding: usize,
pub stride: usize,
pub dilation: usize,
pub groups: usize,
}
impl Default for Conv1dConfig {
fn default() -> Self {
Self {
padding: 0,
stride: 1,
dilation: 1,
groups: 1,
}
}
}
#[derive(Clone, Debug)]
pub struct Conv1d {
weight: Tensor,
bias: Option<Tensor>,
config: Conv1dConfig,
}
impl Conv1d {
pub fn new(weight: Tensor, bias: Option<Tensor>, config: Conv1dConfig) -> Self {
Self {
weight,
bias,
config,
}
}
pub fn config(&self) -> &Conv1dConfig {
&self.config
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn bias(&self) -> Option<&Tensor> {
self.bias.as_ref()
}
}
impl crate::Module for Conv1d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = x.conv1d(
&self.weight,
self.config.padding,
self.config.stride,
self.config.dilation,
self.config.groups,
)?;
match &self.bias {
None => Ok(x),
Some(bias) => {
let b = bias.dims1()?;
let bias = bias.reshape((1, b, 1))?;
Ok(x.broadcast_add(&bias)?)
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ConvTranspose1dConfig {
pub padding: usize,
pub output_padding: usize,
pub stride: usize,
pub dilation: usize,
// TODO: support groups.
}
impl Default for ConvTranspose1dConfig {
fn default() -> Self {
Self {
padding: 0,
output_padding: 0,
stride: 1,
dilation: 1,
}
}
}
#[derive(Clone, Debug)]
pub struct ConvTranspose1d {
weight: Tensor,
bias: Option<Tensor>,
config: ConvTranspose1dConfig,
}
impl ConvTranspose1d {
pub fn new(weight: Tensor, bias: Option<Tensor>, config: ConvTranspose1dConfig) -> Self {
Self {
weight,
bias,
config,
}
}
pub fn config(&self) -> &ConvTranspose1dConfig {
&self.config
}
}
impl crate::Module for ConvTranspose1d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = x.conv_transpose1d(
&self.weight,
self.config.padding,
self.config.output_padding,
self.config.stride,
self.config.dilation,
)?;
match &self.bias {
None => Ok(x),
Some(bias) => {
let b = bias.dims1()?;
let bias = bias.reshape((1, b, 1, 1))?;
Ok(x.broadcast_add(&bias)?)
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Conv2dConfig {
pub padding: usize,
pub stride: usize,
pub dilation: usize,
pub groups: usize,
}
impl Default for Conv2dConfig {
fn default() -> Self {
Self {
padding: 0,
stride: 1,
dilation: 1,
groups: 1,
}
}
}
#[derive(Clone, Debug)]
pub struct Conv2d {
weight: Tensor,
bias: Option<Tensor>,
config: Conv2dConfig,
}
impl Conv2d {
pub fn new(weight: Tensor, bias: Option<Tensor>, config: Conv2dConfig) -> Self {
Self {
weight,
bias,
config,
}
}
pub fn config(&self) -> &Conv2dConfig {
&self.config
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn bias(&self) -> Option<&Tensor> {
self.bias.as_ref()
}
pub fn absorb_bn(&self, bn: &BatchNorm) -> Result<Self> {
if let Some((w_bn, b_bn)) = bn.weight_and_bias() {
let std_ = w_bn.div(&((bn.running_var() + bn.eps())?.sqrt()?))?;
let weight = self
.weight()
.broadcast_mul(&(std_.reshape((self.weight().dims4()?.0, 1, 1, 1))?))?;
let bias = match &self.bias {
None => b_bn.sub(&(std_.mul(bn.running_mean())?))?,
Some(bias) => b_bn.add(&(std_.mul(&bias.sub(bn.running_mean())?)?))?,
};
Ok(Self {
weight,
bias: Some(bias),
config: self.config,
})
} else {
candle::bail!("batch norm does not have weight_and_bias")
}
}
}
impl crate::Module for Conv2d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = x.conv2d(
&self.weight,
self.config.padding,
self.config.stride,
self.config.dilation,
self.config.groups,
)?;
match &self.bias {
None => Ok(x),
Some(bias) => {
let b = bias.dims1()?;
let bias = bias.reshape((1, b, 1, 1))?;
Ok(x.broadcast_add(&bias)?)
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ConvTranspose2dConfig {
pub padding: usize,
pub output_padding: usize,
pub stride: usize,
pub dilation: usize,
// TODO: support groups.
}
impl Default for ConvTranspose2dConfig {
fn default() -> Self {
Self {
padding: 0,
output_padding: 0,
stride: 1,
dilation: 1,
}
}
}
#[derive(Clone, Debug)]
pub struct ConvTranspose2d {
weight: Tensor,
bias: Option<Tensor>,
config: ConvTranspose2dConfig,
}
impl ConvTranspose2d {
pub fn new(weight: Tensor, bias: Option<Tensor>, config: ConvTranspose2dConfig) -> Self {
Self {
weight,
bias,
config,
}
}
pub fn config(&self) -> &ConvTranspose2dConfig {
&self.config
}
}
impl crate::Module for ConvTranspose2d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = x.conv_transpose2d(
&self.weight,
self.config.padding,
self.config.output_padding,
self.config.stride,
self.config.dilation,
)?;
match &self.bias {
None => Ok(x),
Some(bias) => {
let b = bias.dims1()?;
let bias = bias.reshape((1, b, 1, 1))?;
Ok(x.broadcast_add(&bias)?)
}
}
}
}
pub fn conv1d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: Conv1dConfig,
vb: crate::VarBuilder,
) -> Result<Conv1d> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vb.get_with_hints(
(out_channels, in_channels / cfg.groups, kernel_size),
"weight",
init_ws,
)?;
let bound = 1. / (in_channels as f64).sqrt();
let init_bs = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let bs = vb.get_with_hints(out_channels, "bias", init_bs)?;
Ok(Conv1d::new(ws, Some(bs), cfg))
}
pub fn conv_transpose1d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: ConvTranspose1dConfig,
vb: crate::VarBuilder,
) -> Result<ConvTranspose1d> {
let bound = 1. / (out_channels as f64 * kernel_size as f64).sqrt();
let init = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let ws = vb.get_with_hints((in_channels, out_channels, kernel_size), "weight", init)?;
let bs = vb.get_with_hints(out_channels, "bias", init)?;
Ok(ConvTranspose1d::new(ws, Some(bs), cfg))
}
pub fn conv_transpose1d_no_bias(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: ConvTranspose1dConfig,
vb: crate::VarBuilder,
) -> Result<ConvTranspose1d> {
let bound = 1. / (out_channels as f64 * kernel_size as f64).sqrt();
let init = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let ws = vb.get_with_hints((in_channels, out_channels, kernel_size), "weight", init)?;
Ok(ConvTranspose1d::new(ws, None, cfg))
}
pub fn conv2d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: Conv2dConfig,
vb: crate::VarBuilder,
) -> Result<Conv2d> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vb.get_with_hints(
(
out_channels,
in_channels / cfg.groups,
kernel_size,
kernel_size,
),
"weight",
init_ws,
)?;
let bound = 1. / (in_channels as f64).sqrt();
let init_bs = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let bs = vb.get_with_hints(out_channels, "bias", init_bs)?;
Ok(Conv2d::new(ws, Some(bs), cfg))
}
pub fn conv2d_no_bias(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: Conv2dConfig,
vb: crate::VarBuilder,
) -> Result<Conv2d> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vb.get_with_hints(
(
out_channels,
in_channels / cfg.groups,
kernel_size,
kernel_size,
),
"weight",
init_ws,
)?;
Ok(Conv2d::new(ws, None, cfg))
}
pub fn conv_transpose2d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: ConvTranspose2dConfig,
vb: crate::VarBuilder,
) -> Result<ConvTranspose2d> {
let bound = 1. / (out_channels as f64).sqrt() / kernel_size as f64;
let init = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let ws = vb.get_with_hints(
(in_channels, out_channels, kernel_size, kernel_size),
"weight",
init,
)?;
let bs = vb.get_with_hints(out_channels, "bias", init)?;
Ok(ConvTranspose2d::new(ws, Some(bs), cfg))
}
pub fn conv_transpose2d_no_bias(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: ConvTranspose2dConfig,
vb: crate::VarBuilder,
) -> Result<ConvTranspose2d> {
let bound = 1. / (out_channels as f64).sqrt() / kernel_size as f64;
let init = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let ws = vb.get_with_hints(
(in_channels, out_channels, kernel_size, kernel_size),
"weight",
init,
)?;
Ok(ConvTranspose2d::new(ws, None, cfg))
}
| candle/candle-nn/src/conv.rs/0 | {
"file_path": "candle/candle-nn/src/conv.rs",
"repo_id": "candle",
"token_count": 5440
} | 29 |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use candle::{test_utils, DType, Device, Tensor};
use candle_nn::BatchNorm;
/* The test below has been generated using the following PyTorch code:
import torch
torch.manual_seed(19551105)
m = torch.nn.BatchNorm2d(5, affine=False)
input = torch.randn(2, 5, 3, 4)
output = m(input)
print(input.flatten())
print(output.flatten())
print(m.running_mean)
print(m.running_var)
*/
#[test]
fn batch_norm() -> Result<()> {
let running_mean = Tensor::zeros(5, DType::F32, &Device::Cpu)?;
let running_var = Tensor::ones(5, DType::F32, &Device::Cpu)?;
let bn = BatchNorm::new_no_bias(5, running_mean.clone(), running_var.clone(), 1e-8)?;
let input: [f32; 120] = [
-0.7493, -1.0410, 1.6977, -0.6579, 1.7982, -0.0087, 0.2812, -0.1190, 0.2908, -0.5975,
-0.0278, -0.2138, -1.3130, -1.6048, -2.2028, 0.9452, 0.4002, 0.0831, 1.0004, 0.1860,
0.5004, 0.5539, 0.9991, -0.2540, -0.0703, -0.3752, -0.1096, -0.2374, 1.0258, -2.2208,
-0.0257, 0.6073, -1.1627, -0.0964, -1.9718, 1.6577, 0.1931, -0.3692, -0.8011, 0.9059,
0.4797, 0.6521, -0.0165, -0.6683, -0.4148, 2.0649, -0.8276, 1.7947, -0.2061, 0.5812,
-1.3598, 1.6192, 1.0466, -0.4423, 0.4202, 0.1749, 0.6969, 0.2616, -0.0369, -1.4951,
-0.0814, -0.1877, 0.0267, 0.6150, 0.2402, -1.1440, -2.0068, 0.6032, -2.6639, 0.8260,
0.1085, -0.1693, 1.2805, 0.7654, -0.4930, 0.3770, 1.1309, 0.2303, 0.2949, -0.2634, -0.5225,
0.4269, 0.6341, 1.5736, 0.9827, -1.2499, 0.3509, -1.6243, -0.8123, 0.7634, -0.3047, 0.0143,
-0.4032, 0.0537, 0.7022, 0.8405, -1.2221, -1.6847, -0.0714, -0.1608, 0.5579, -1.5858,
0.4617, -0.6480, 0.1332, 0.0419, -0.9784, 0.4173, 1.2313, -1.9046, -0.1656, 0.1259, 0.0763,
1.4252, -0.9115, -0.1093, -0.3100, -0.6734, -1.4357, 0.9205,
];
let input = Tensor::new(&input, &Device::Cpu)?.reshape((2, 5, 3, 4))?;
let output = bn.forward_train(&input)?;
assert_eq!(output.dims(), &[2, 5, 3, 4]);
let output = output.flatten_all()?;
assert_eq!(
test_utils::to_vec1_round(&output, 4)?,
&[
-0.6391, -0.9414, 1.8965, -0.5444, 2.0007, 0.1283, 0.4287, 0.014, 0.4387, -0.4818,
0.1085, -0.0842, -1.6809, -2.0057, -2.6714, 0.8328, 0.2262, -0.1268, 0.8943, -0.0123,
0.3377, 0.3973, 0.8928, -0.5021, 0.0861, -0.2324, 0.0451, -0.0884, 1.2311, -2.1603,
0.1327, 0.7939, -1.055, 0.0589, -1.9002, 1.8912, 0.2918, -0.3253, -0.7993, 1.0741,
0.6063, 0.7955, 0.0617, -0.6536, -0.3754, 2.3461, -0.8284, 2.0495, -0.201, 0.6476,
-1.4446, 1.7665, 1.1493, -0.4556, 0.4741, 0.2097, 0.7723, 0.3031, -0.0186, -1.5905,
0.053, -0.0572, 0.165, 0.7746, 0.3862, -1.0481, -1.9422, 0.7624, -2.6231, 0.9933,
0.2498, -0.0381, 1.2061, 0.6327, -0.7681, 0.2004, 1.0396, 0.037, 0.109, -0.5125,
-0.8009, 0.2559, 0.4865, 1.5324, 1.1861, -1.1461, 0.5261, -1.5372, -0.689, 0.957,
-0.1587, 0.1745, -0.2616, 0.2156, 0.8931, 1.0375, -1.2614, -1.7691, 0.0015, -0.0966,
0.6921, -1.6605, 0.5866, -0.6313, 0.226, 0.1258, -0.9939, 0.5378, 1.3484, -2.0319,
-0.1574, 0.1568, 0.1034, 1.5574, -0.9614, -0.0967, -0.313, -0.7047, -1.5264, 1.0134
]
);
let bn2 = BatchNorm::new(
5,
running_mean,
running_var,
Tensor::new(&[0.5f32], &Device::Cpu)?.broadcast_as(5)?,
Tensor::new(&[-1.5f32], &Device::Cpu)?.broadcast_as(5)?,
1e-8,
)?;
let output2 = bn2.forward_train(&input)?;
assert_eq!(output2.dims(), &[2, 5, 3, 4]);
let output2 = output2.flatten_all()?;
let diff2 = ((output2 - (output * 0.5)?)? + 1.5)?.sqr()?;
let sum_diff2 = diff2.sum_keepdim(0)?;
assert_eq!(test_utils::to_vec1_round(&sum_diff2, 4)?, &[0f32]);
assert_eq!(
test_utils::to_vec1_round(bn.running_mean(), 4)?,
&[-0.0133, 0.0197, -0.0153, -0.0073, -0.0020]
);
assert_eq!(
test_utils::to_vec1_round(bn.running_var(), 4)?,
&[0.9972, 0.9842, 0.9956, 0.9866, 0.9898]
);
Ok(())
}
| candle/candle-nn/tests/batch_norm.rs/0 | {
"file_path": "candle/candle-nn/tests/batch_norm.rs",
"repo_id": "candle",
"token_count": 2474
} | 30 |
[package]
name = "candle-pyo3"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
readme = "README.md"
[lib]
name = "candle"
crate-type = ["cdylib"]
[dependencies]
accelerate-src = { workspace = true, optional = true }
candle = { workspace = true }
candle-nn = { workspace = true }
candle-onnx = { workspace = true, optional = true }
half = { workspace = true }
intel-mkl-src = { workspace = true, optional = true }
pyo3 = { version = "0.20.0", features = ["extension-module", "abi3-py38"] }
[build-dependencies]
pyo3-build-config = "0.20"
[features]
default = []
accelerate = ["dep:accelerate-src", "candle/accelerate"]
cuda = ["candle/cuda"]
mkl = ["dep:intel-mkl-src","candle/mkl"]
onnx = ["dep:candle-onnx"]
| candle/candle-pyo3/Cargo.toml/0 | {
"file_path": "candle/candle-pyo3/Cargo.toml",
"repo_id": "candle",
"token_count": 315
} | 31 |
import candle
from candle import Tensor
from .module import Module
from typing import Union, List, Tuple, Optional, Any
_shape_t = Union[int, List[int]]
import numbers
class LayerNorm(Module):
r"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`
math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
"""
__constants__ = ["normalized_shape", "eps"]
normalized_shape: Tuple[int, ...]
eps: float
def __init__(
self,
normalized_shape: _shape_t,
eps: float = 1e-5,
bias: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.weight = candle.ones(normalized_shape, **factory_kwargs)
if bias:
self.bias = candle.zeros(normalized_shape, **factory_kwargs)
else:
self.bias = None
def forward(self, input: Tensor) -> Tensor:
mean_x = input.sum_keepdim(2) / float(self.normalized_shape[-1])
x = input.broadcast_sub(mean_x)
norm_x = x.sqr().sum_keepdim(2) / float(self.normalized_shape[-1])
x_normed = x.broadcast_div((norm_x + self.eps).sqrt())
x = x_normed.broadcast_mul(self.weight)
if self.bias:
x = x.broadcast_add(self.bias)
return x
def extra_repr(self) -> str:
return "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__)
| candle/candle-pyo3/py_src/candle/nn/normalization.py/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/nn/normalization.py",
"repo_id": "candle",
"token_count": 803
} | 32 |
import candle
import torch
# convert from candle tensor to torch tensor
t = candle.randn((3, 512, 512))
torch_tensor = t.to_torch()
print(torch_tensor)
print(type(torch_tensor))
# convert from torch tensor to candle tensor
t = torch.randn((3, 512, 512))
candle_tensor = candle.Tensor(t)
print(candle_tensor)
print(type(candle_tensor))
| candle/candle-pyo3/test_pytorch.py/0 | {
"file_path": "candle/candle-pyo3/test_pytorch.py",
"repo_id": "candle",
"token_count": 126
} | 33 |
use candle::Result;
use candle_nn::{batch_norm, Conv2dConfig, Module, VarBuilder};
#[allow(clippy::many_single_char_names)]
fn conv2d_same(
i: usize,
o: usize,
k: usize,
c: Conv2dConfig,
vb: VarBuilder,
) -> Result<impl Module> {
let conv2d = candle_nn::conv2d(i, o, k, c, vb)?;
let s = c.stride;
let module = candle_nn::func(move |xs| {
let ih = xs.dim(2)?;
let iw = xs.dim(3)?;
let oh = (ih + s - 1) / s;
let ow = (iw + s - 1) / s;
let pad_h = usize::max((oh - 1) * s + k - ih, 0);
let pad_w = usize::max((ow - 1) * s + k - iw, 0);
if pad_h > 0 || pad_w > 0 {
xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)?
.pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)?
.apply(&conv2d)
} else {
xs.apply(&conv2d)
}
});
Ok(module)
}
fn block(dim: usize, kernel_size: usize, vb: VarBuilder) -> Result<impl Module> {
let conv2d_cfg = Conv2dConfig {
groups: dim,
..Default::default()
};
let vb_fn = vb.pp(0).pp("fn");
let conv1 = conv2d_same(dim, dim, kernel_size, conv2d_cfg, vb_fn.pp(0))?;
let bn1 = batch_norm(dim, 1e-5, vb_fn.pp(2))?;
let conv2 = candle_nn::conv2d(dim, dim, 1, Default::default(), vb.pp(1))?;
let bn2 = batch_norm(dim, 1e-5, vb.pp(3))?;
Ok(candle_nn::func(move |xs| {
let ys = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?;
(xs + ys)?.apply(&conv2)?.gelu_erf()?.apply_t(&bn2, false)
}))
}
fn convmixer(
nclasses: usize,
dim: usize,
depth: usize,
kernel_size: usize,
patch_size: usize,
vb: VarBuilder,
) -> Result<candle_nn::Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: patch_size,
..Default::default()
};
let conv1 = candle_nn::conv2d(3, dim, patch_size, conv2d_cfg, vb.pp(0))?;
let bn1 = batch_norm(dim, 1e-5, vb.pp(2))?;
let blocks: Vec<_> = (0..depth)
.map(|index| block(dim, kernel_size, vb.pp(3 + index)))
.collect::<Result<Vec<_>>>()?;
let fc = candle_nn::linear(dim, nclasses, vb.pp(25))?;
Ok(candle_nn::func(move |xs| {
let mut xs = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?;
for block in blocks.iter() {
xs = xs.apply(block)?
}
// This performs the adaptive average pooling with a target size of (1, 1).
xs.mean(3)?.mean(2)?.apply(&fc)
}))
}
pub fn c1536_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> {
convmixer(nclasses, 1536, 20, 9, 7, vb)
}
pub fn c1024_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> {
convmixer(nclasses, 1024, 20, 9, 14, vb)
}
| candle/candle-transformers/src/models/convmixer.rs/0 | {
"file_path": "candle/candle-transformers/src/models/convmixer.rs",
"repo_id": "candle",
"token_count": 1413
} | 34 |
use candle::DType;
use serde::Deserialize;
pub const DTYPE: DType = DType::F32;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PositionEmbeddingType {
Absolute,
Alibi,
}
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/persimmon/configuration_persimmon.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub hidden_act: candle_nn::Activation,
pub max_position_embeddings: usize,
pub initializer_range: f64,
pub layer_norm_eps: f64,
pub rms_norm_eps: f64,
pub use_cache: bool,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub qk_layernorm: bool,
pub partial_rotary_factor: f64,
}
impl Config {
pub fn base_8b() -> Self {
// https://huggingface.co/adept/persimmon-8b-base/blob/main/config.json
Self {
hidden_act: candle_nn::Activation::Relu,
hidden_size: 4096,
initializer_range: 0.02,
intermediate_size: 16384,
layer_norm_eps: 1e-05,
max_position_embeddings: 16384,
num_attention_heads: 64,
num_hidden_layers: 36,
num_key_value_heads: 64,
qk_layernorm: true,
rms_norm_eps: 1e-06,
rope_theta: 25000.0,
tie_word_embeddings: false,
use_cache: true,
vocab_size: 262144,
partial_rotary_factor: 0.5,
}
}
}
| candle/candle-transformers/src/models/persimmon.rs/0 | {
"file_path": "candle/candle-transformers/src/models/persimmon.rs",
"repo_id": "candle",
"token_count": 814
} | 35 |
use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::VarBuilder;
#[derive(Debug)]
struct PositionEmbeddingRandom {
positional_encoding_gaussian_matrix: Tensor,
}
impl PositionEmbeddingRandom {
fn new(num_pos_feats: usize, vb: VarBuilder) -> Result<Self> {
let positional_encoding_gaussian_matrix =
vb.get((2, num_pos_feats), "positional_encoding_gaussian_matrix")?;
Ok(Self {
positional_encoding_gaussian_matrix,
})
}
fn pe_encoding(&self, coords: &Tensor) -> Result<Tensor> {
let coords = coords.affine(2., -1.)?;
let coords = coords.broadcast_matmul(&self.positional_encoding_gaussian_matrix)?;
let coords = (coords * (2. * std::f64::consts::PI))?;
Tensor::cat(&[coords.sin()?, coords.cos()?], D::Minus1)
}
fn forward(&self, h: usize, w: usize) -> Result<Tensor> {
let device = self.positional_encoding_gaussian_matrix.device();
let x_embed = (Tensor::arange(0u32, w as u32, device)?.to_dtype(DType::F32)? + 0.5)?;
let y_embed = (Tensor::arange(0u32, h as u32, device)?.to_dtype(DType::F32)? + 0.5)?;
let x_embed = (x_embed / w as f64)?
.reshape((1, ()))?
.broadcast_as((h, w))?;
let y_embed = (y_embed / h as f64)?
.reshape(((), 1))?
.broadcast_as((h, w))?;
let coords = Tensor::stack(&[&x_embed, &y_embed], D::Minus1)?;
self.pe_encoding(&coords)?.permute((2, 0, 1))
}
fn forward_with_coords(
&self,
coords_input: &Tensor,
image_size: (usize, usize),
) -> Result<Tensor> {
let coords0 = (coords_input.narrow(D::Minus1, 0, 1)? / image_size.1 as f64)?;
let coords1 = (coords_input.narrow(D::Minus1, 1, 1)? / image_size.0 as f64)?;
let c = coords_input.dim(D::Minus1)?;
let coords_rest = coords_input.narrow(D::Minus1, 2, c - 2)?;
let coords = Tensor::cat(&[&coords0, &coords1, &coords_rest], D::Minus1)?;
self.pe_encoding(&coords)
}
}
#[derive(Debug)]
pub struct PromptEncoder {
pe_layer: PositionEmbeddingRandom,
point_embeddings: Vec<candle_nn::Embedding>,
not_a_point_embed: candle_nn::Embedding,
mask_downscaling_conv1: candle_nn::Conv2d,
mask_downscaling_ln1: super::LayerNorm2d,
mask_downscaling_conv2: candle_nn::Conv2d,
mask_downscaling_ln2: super::LayerNorm2d,
mask_downscaling_conv3: candle_nn::Conv2d,
no_mask_embed: candle_nn::Embedding,
image_embedding_size: (usize, usize),
input_image_size: (usize, usize),
embed_dim: usize,
span: tracing::Span,
}
impl PromptEncoder {
pub fn new(
embed_dim: usize,
image_embedding_size: (usize, usize),
input_image_size: (usize, usize),
mask_in_chans: usize,
vb: VarBuilder,
) -> Result<Self> {
let num_points_embeddings = 4;
let pe_layer = PositionEmbeddingRandom::new(embed_dim / 2, vb.pp("pe_layer"))?;
let not_a_point_embed = candle_nn::embedding(1, embed_dim, vb.pp("not_a_point_embed"))?;
let no_mask_embed = candle_nn::embedding(1, embed_dim, vb.pp("no_mask_embed"))?;
let cfg = candle_nn::Conv2dConfig {
stride: 2,
..Default::default()
};
let mask_downscaling_conv1 =
candle_nn::conv2d(1, mask_in_chans / 4, 2, cfg, vb.pp("mask_downscaling.0"))?;
let mask_downscaling_conv2 = candle_nn::conv2d(
mask_in_chans / 4,
mask_in_chans,
2,
cfg,
vb.pp("mask_downscaling.3"),
)?;
let mask_downscaling_conv3 = candle_nn::conv2d(
mask_in_chans,
embed_dim,
1,
Default::default(),
vb.pp("mask_downscaling.6"),
)?;
let mask_downscaling_ln1 =
super::LayerNorm2d::new(mask_in_chans / 4, 1e-6, vb.pp("mask_downscaling.1"))?;
let mask_downscaling_ln2 =
super::LayerNorm2d::new(mask_in_chans, 1e-6, vb.pp("mask_downscaling.4"))?;
let mut point_embeddings = Vec::with_capacity(num_points_embeddings);
let vb_e = vb.pp("point_embeddings");
for i in 0..num_points_embeddings {
let emb = candle_nn::embedding(1, embed_dim, vb_e.pp(i))?;
point_embeddings.push(emb)
}
let span = tracing::span!(tracing::Level::TRACE, "prompt-encoder");
Ok(Self {
pe_layer,
point_embeddings,
not_a_point_embed,
mask_downscaling_conv1,
mask_downscaling_ln1,
mask_downscaling_conv2,
mask_downscaling_ln2,
mask_downscaling_conv3,
no_mask_embed,
image_embedding_size,
input_image_size,
embed_dim,
span,
})
}
pub fn get_dense_pe(&self) -> Result<Tensor> {
self.pe_layer
.forward(self.image_embedding_size.0, self.image_embedding_size.1)?
.unsqueeze(0)
}
fn embed_masks(&self, masks: &Tensor) -> Result<Tensor> {
masks
.apply(&self.mask_downscaling_conv1)?
.apply(&self.mask_downscaling_ln1)?
.gelu()?
.apply(&self.mask_downscaling_conv2)?
.apply(&self.mask_downscaling_ln2)?
.gelu()?
.apply(&self.mask_downscaling_conv3)
}
fn embed_points(&self, points: &Tensor, labels: &Tensor, pad: bool) -> Result<Tensor> {
let points = (points + 0.5)?;
let dev = points.device();
let (points, labels) = if pad {
let padding_point = Tensor::zeros((points.dim(0)?, 1, 2), DType::F32, dev)?;
let padding_label = (Tensor::ones((labels.dim(0)?, 1), DType::F32, dev)? * (-1f64))?;
let points = Tensor::cat(&[&points, &padding_point], 1)?;
let labels = Tensor::cat(&[labels, &padding_label], 1)?;
(points, labels)
} else {
(points, labels.clone())
};
let point_embedding = self
.pe_layer
.forward_with_coords(&points, self.input_image_size)?;
let labels = labels.unsqueeze(2)?.broadcast_as(point_embedding.shape())?;
let zeros = point_embedding.zeros_like()?;
let point_embedding = labels.lt(0f32)?.where_cond(
&self
.not_a_point_embed
.embeddings()
.broadcast_as(zeros.shape())?,
&point_embedding,
)?;
let labels0 = labels.eq(0f32)?.where_cond(
&self.point_embeddings[0]
.embeddings()
.broadcast_as(zeros.shape())?,
&zeros,
)?;
let point_embedding = (point_embedding + labels0)?;
let labels1 = labels.eq(1f32)?.where_cond(
&self.point_embeddings[1]
.embeddings()
.broadcast_as(zeros.shape())?,
&zeros,
)?;
let point_embedding = (point_embedding + labels1)?;
Ok(point_embedding)
}
fn embed_boxes(&self, boxes: &Tensor) -> Result<Tensor> {
let boxes = (boxes + 0.5)?;
let coords = boxes.reshape(((), 2, 2))?;
let corner_embedding = self
.pe_layer
.forward_with_coords(&coords, self.input_image_size)?;
let ce1 = corner_embedding.i((.., 0))?;
let ce2 = corner_embedding.i((.., 1))?;
let ce1 = (ce1 + self.point_embeddings[2].embeddings())?;
let ce2 = (ce2 + self.point_embeddings[3].embeddings())?;
Tensor::cat(&[&ce1, &ce2], 1)
}
pub fn forward(
&self,
points: Option<(&Tensor, &Tensor)>,
boxes: Option<&Tensor>,
masks: Option<&Tensor>,
) -> Result<(Tensor, Tensor)> {
let _enter = self.span.enter();
let se_points = match points {
Some((coords, labels)) => Some(self.embed_points(coords, labels, boxes.is_none())?),
None => None,
};
let se_boxes = match boxes {
Some(boxes) => Some(self.embed_boxes(boxes)?),
None => None,
};
let sparse_embeddings = match (se_points, se_boxes) {
(Some(se_points), Some(se_boxes)) => Tensor::cat(&[se_points, se_boxes], 1)?,
(Some(se_points), None) => se_points,
(None, Some(se_boxes)) => se_boxes,
(None, None) => {
Tensor::zeros((1, 0, self.embed_dim), DType::F32, &candle::Device::Cpu)?
}
};
let dense_embeddings = match masks {
None => {
let emb = self.no_mask_embed.embeddings();
emb.reshape((1, (), 1, 1))?.expand((
1,
emb.elem_count(),
self.image_embedding_size.0,
self.image_embedding_size.1,
))?
}
Some(masks) => self.embed_masks(masks)?,
};
Ok((sparse_embeddings, dense_embeddings))
}
}
| candle/candle-transformers/src/models/segment_anything/prompt_encoder.rs/0 | {
"file_path": "candle/candle-transformers/src/models/segment_anything/prompt_encoder.rs",
"repo_id": "candle",
"token_count": 4719
} | 36 |
#![allow(dead_code)]
//! # Variational Auto-Encoder (VAE) Models.
//!
//! Auto-encoder models compress their input to a usually smaller latent space
//! before expanding it back to its original shape. This results in the latent values
//! compressing the original information.
use super::unet_2d_blocks::{
DownEncoderBlock2D, DownEncoderBlock2DConfig, UNetMidBlock2D, UNetMidBlock2DConfig,
UpDecoderBlock2D, UpDecoderBlock2DConfig,
};
use candle::{Result, Tensor};
use candle_nn as nn;
use candle_nn::Module;
#[derive(Debug, Clone)]
struct EncoderConfig {
// down_block_types: DownEncoderBlock2D
block_out_channels: Vec<usize>,
layers_per_block: usize,
norm_num_groups: usize,
double_z: bool,
}
impl Default for EncoderConfig {
fn default() -> Self {
Self {
block_out_channels: vec![64],
layers_per_block: 2,
norm_num_groups: 32,
double_z: true,
}
}
}
#[derive(Debug)]
struct Encoder {
conv_in: nn::Conv2d,
down_blocks: Vec<DownEncoderBlock2D>,
mid_block: UNetMidBlock2D,
conv_norm_out: nn::GroupNorm,
conv_out: nn::Conv2d,
#[allow(dead_code)]
config: EncoderConfig,
}
impl Encoder {
fn new(
vs: nn::VarBuilder,
in_channels: usize,
out_channels: usize,
config: EncoderConfig,
) -> Result<Self> {
let conv_cfg = nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let conv_in = nn::conv2d(
in_channels,
config.block_out_channels[0],
3,
conv_cfg,
vs.pp("conv_in"),
)?;
let mut down_blocks = vec![];
let vs_down_blocks = vs.pp("down_blocks");
for index in 0..config.block_out_channels.len() {
let out_channels = config.block_out_channels[index];
let in_channels = if index > 0 {
config.block_out_channels[index - 1]
} else {
config.block_out_channels[0]
};
let is_final = index + 1 == config.block_out_channels.len();
let cfg = DownEncoderBlock2DConfig {
num_layers: config.layers_per_block,
resnet_eps: 1e-6,
resnet_groups: config.norm_num_groups,
add_downsample: !is_final,
downsample_padding: 0,
..Default::default()
};
let down_block = DownEncoderBlock2D::new(
vs_down_blocks.pp(&index.to_string()),
in_channels,
out_channels,
cfg,
)?;
down_blocks.push(down_block)
}
let last_block_out_channels = *config.block_out_channels.last().unwrap();
let mid_cfg = UNetMidBlock2DConfig {
resnet_eps: 1e-6,
output_scale_factor: 1.,
attn_num_head_channels: None,
resnet_groups: Some(config.norm_num_groups),
..Default::default()
};
let mid_block =
UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?;
let conv_norm_out = nn::group_norm(
config.norm_num_groups,
last_block_out_channels,
1e-6,
vs.pp("conv_norm_out"),
)?;
let conv_out_channels = if config.double_z {
2 * out_channels
} else {
out_channels
};
let conv_cfg = nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let conv_out = nn::conv2d(
last_block_out_channels,
conv_out_channels,
3,
conv_cfg,
vs.pp("conv_out"),
)?;
Ok(Self {
conv_in,
down_blocks,
mid_block,
conv_norm_out,
conv_out,
config,
})
}
}
impl Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.conv_in)?;
for down_block in self.down_blocks.iter() {
xs = xs.apply(down_block)?
}
let xs = self
.mid_block
.forward(&xs, None)?
.apply(&self.conv_norm_out)?;
nn::ops::silu(&xs)?.apply(&self.conv_out)
}
}
#[derive(Debug, Clone)]
struct DecoderConfig {
// up_block_types: UpDecoderBlock2D
block_out_channels: Vec<usize>,
layers_per_block: usize,
norm_num_groups: usize,
}
impl Default for DecoderConfig {
fn default() -> Self {
Self {
block_out_channels: vec![64],
layers_per_block: 2,
norm_num_groups: 32,
}
}
}
#[derive(Debug)]
struct Decoder {
conv_in: nn::Conv2d,
up_blocks: Vec<UpDecoderBlock2D>,
mid_block: UNetMidBlock2D,
conv_norm_out: nn::GroupNorm,
conv_out: nn::Conv2d,
#[allow(dead_code)]
config: DecoderConfig,
}
impl Decoder {
fn new(
vs: nn::VarBuilder,
in_channels: usize,
out_channels: usize,
config: DecoderConfig,
) -> Result<Self> {
let n_block_out_channels = config.block_out_channels.len();
let last_block_out_channels = *config.block_out_channels.last().unwrap();
let conv_cfg = nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let conv_in = nn::conv2d(
in_channels,
last_block_out_channels,
3,
conv_cfg,
vs.pp("conv_in"),
)?;
let mid_cfg = UNetMidBlock2DConfig {
resnet_eps: 1e-6,
output_scale_factor: 1.,
attn_num_head_channels: None,
resnet_groups: Some(config.norm_num_groups),
..Default::default()
};
let mid_block =
UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?;
let mut up_blocks = vec![];
let vs_up_blocks = vs.pp("up_blocks");
let reversed_block_out_channels: Vec<_> =
config.block_out_channels.iter().copied().rev().collect();
for index in 0..n_block_out_channels {
let out_channels = reversed_block_out_channels[index];
let in_channels = if index > 0 {
reversed_block_out_channels[index - 1]
} else {
reversed_block_out_channels[0]
};
let is_final = index + 1 == n_block_out_channels;
let cfg = UpDecoderBlock2DConfig {
num_layers: config.layers_per_block + 1,
resnet_eps: 1e-6,
resnet_groups: config.norm_num_groups,
add_upsample: !is_final,
..Default::default()
};
let up_block = UpDecoderBlock2D::new(
vs_up_blocks.pp(&index.to_string()),
in_channels,
out_channels,
cfg,
)?;
up_blocks.push(up_block)
}
let conv_norm_out = nn::group_norm(
config.norm_num_groups,
config.block_out_channels[0],
1e-6,
vs.pp("conv_norm_out"),
)?;
let conv_cfg = nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let conv_out = nn::conv2d(
config.block_out_channels[0],
out_channels,
3,
conv_cfg,
vs.pp("conv_out"),
)?;
Ok(Self {
conv_in,
up_blocks,
mid_block,
conv_norm_out,
conv_out,
config,
})
}
}
impl Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = self.mid_block.forward(&self.conv_in.forward(xs)?, None)?;
for up_block in self.up_blocks.iter() {
xs = up_block.forward(&xs)?
}
let xs = self.conv_norm_out.forward(&xs)?;
let xs = nn::ops::silu(&xs)?;
self.conv_out.forward(&xs)
}
}
#[derive(Debug, Clone)]
pub struct AutoEncoderKLConfig {
pub block_out_channels: Vec<usize>,
pub layers_per_block: usize,
pub latent_channels: usize,
pub norm_num_groups: usize,
}
impl Default for AutoEncoderKLConfig {
fn default() -> Self {
Self {
block_out_channels: vec![64],
layers_per_block: 1,
latent_channels: 4,
norm_num_groups: 32,
}
}
}
pub struct DiagonalGaussianDistribution {
mean: Tensor,
std: Tensor,
}
impl DiagonalGaussianDistribution {
pub fn new(parameters: &Tensor) -> Result<Self> {
let mut parameters = parameters.chunk(2, 1)?.into_iter();
let mean = parameters.next().unwrap();
let logvar = parameters.next().unwrap();
let std = (logvar * 0.5)?.exp()?;
Ok(DiagonalGaussianDistribution { mean, std })
}
pub fn sample(&self) -> Result<Tensor> {
let sample = self.mean.randn_like(0., 1.);
&self.mean + &self.std * sample
}
}
// https://github.com/huggingface/diffusers/blob/970e30606c2944e3286f56e8eb6d3dc6d1eb85f7/src/diffusers/models/vae.py#L485
// This implementation is specific to the config used in stable-diffusion-v1-5
// https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/vae/config.json
#[derive(Debug)]
pub struct AutoEncoderKL {
encoder: Encoder,
decoder: Decoder,
quant_conv: nn::Conv2d,
post_quant_conv: nn::Conv2d,
pub config: AutoEncoderKLConfig,
}
impl AutoEncoderKL {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
out_channels: usize,
config: AutoEncoderKLConfig,
) -> Result<Self> {
let latent_channels = config.latent_channels;
let encoder_cfg = EncoderConfig {
block_out_channels: config.block_out_channels.clone(),
layers_per_block: config.layers_per_block,
norm_num_groups: config.norm_num_groups,
double_z: true,
};
let encoder = Encoder::new(vs.pp("encoder"), in_channels, latent_channels, encoder_cfg)?;
let decoder_cfg = DecoderConfig {
block_out_channels: config.block_out_channels.clone(),
layers_per_block: config.layers_per_block,
norm_num_groups: config.norm_num_groups,
};
let decoder = Decoder::new(vs.pp("decoder"), latent_channels, out_channels, decoder_cfg)?;
let conv_cfg = Default::default();
let quant_conv = nn::conv2d(
2 * latent_channels,
2 * latent_channels,
1,
conv_cfg,
vs.pp("quant_conv"),
)?;
let post_quant_conv = nn::conv2d(
latent_channels,
latent_channels,
1,
conv_cfg,
vs.pp("post_quant_conv"),
)?;
Ok(Self {
encoder,
decoder,
quant_conv,
post_quant_conv,
config,
})
}
/// Returns the distribution in the latent space.
pub fn encode(&self, xs: &Tensor) -> Result<DiagonalGaussianDistribution> {
let xs = self.encoder.forward(xs)?;
let parameters = self.quant_conv.forward(&xs)?;
DiagonalGaussianDistribution::new(¶meters)
}
/// Takes as input some sampled values.
pub fn decode(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.post_quant_conv.forward(xs)?;
self.decoder.forward(&xs)
}
}
| candle/candle-transformers/src/models/stable_diffusion/vae.rs/0 | {
"file_path": "candle/candle-transformers/src/models/stable_diffusion/vae.rs",
"repo_id": "candle",
"token_count": 6006
} | 37 |
use super::common::LayerNormNoWeights;
use candle::{Module, Result, Tensor};
use candle_nn::VarBuilder;
#[derive(Debug)]
pub struct MixingResidualBlock {
norm1: LayerNormNoWeights,
depthwise_conv: candle_nn::Conv2d,
norm2: LayerNormNoWeights,
channelwise_lin1: candle_nn::Linear,
channelwise_lin2: candle_nn::Linear,
gammas: Vec<f32>,
}
impl MixingResidualBlock {
pub fn new(inp: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> {
let norm1 = LayerNormNoWeights::new(inp)?;
let norm2 = LayerNormNoWeights::new(inp)?;
let cfg = candle_nn::Conv2dConfig {
groups: inp,
..Default::default()
};
let depthwise_conv = candle_nn::conv2d(inp, inp, 3, cfg, vb.pp("depthwise.1"))?;
let channelwise_lin1 = candle_nn::linear(inp, embed_dim, vb.pp("channelwise.0"))?;
let channelwise_lin2 = candle_nn::linear(embed_dim, inp, vb.pp("channelwise.2"))?;
let gammas = vb.get(6, "gammas")?.to_vec1::<f32>()?;
Ok(Self {
norm1,
depthwise_conv,
norm2,
channelwise_lin1,
channelwise_lin2,
gammas,
})
}
}
impl Module for MixingResidualBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mods = &self.gammas;
let x_temp = xs
.permute((0, 2, 3, 1))?
.apply(&self.norm1)?
.permute((0, 3, 1, 2))?
.affine(1. + mods[0] as f64, mods[1] as f64)?;
let x_temp = candle_nn::ops::replication_pad2d(&x_temp, 1)?;
let xs = (xs + x_temp.apply(&self.depthwise_conv)? * mods[2] as f64)?;
let x_temp = xs
.permute((0, 2, 3, 1))?
.apply(&self.norm2)?
.permute((0, 3, 1, 2))?
.affine(1. + mods[3] as f64, mods[4] as f64)?;
let x_temp = x_temp
.permute((0, 2, 3, 1))?
.contiguous()?
.apply(&self.channelwise_lin1)?
.gelu()?
.apply(&self.channelwise_lin2)?
.permute((0, 3, 1, 2))?;
xs + x_temp * mods[5] as f64
}
}
#[derive(Debug)]
pub struct PaellaVQ {
in_block_conv: candle_nn::Conv2d,
out_block_conv: candle_nn::Conv2d,
down_blocks: Vec<(Option<candle_nn::Conv2d>, MixingResidualBlock)>,
down_blocks_conv: candle_nn::Conv2d,
down_blocks_bn: candle_nn::BatchNorm,
up_blocks_conv: candle_nn::Conv2d,
up_blocks: Vec<(Vec<MixingResidualBlock>, Option<candle_nn::ConvTranspose2d>)>,
}
impl PaellaVQ {
pub fn new(vb: VarBuilder) -> Result<Self> {
const IN_CHANNELS: usize = 3;
const OUT_CHANNELS: usize = 3;
const LATENT_CHANNELS: usize = 4;
const EMBED_DIM: usize = 384;
const BOTTLENECK_BLOCKS: usize = 12;
const C_LEVELS: [usize; 2] = [EMBED_DIM / 2, EMBED_DIM];
let in_block_conv = candle_nn::conv2d(
IN_CHANNELS * 4,
C_LEVELS[0],
1,
Default::default(),
vb.pp("in_block.1"),
)?;
let out_block_conv = candle_nn::conv2d(
C_LEVELS[0],
OUT_CHANNELS * 4,
1,
Default::default(),
vb.pp("out_block.0"),
)?;
let mut down_blocks = Vec::new();
let vb_d = vb.pp("down_blocks");
let mut d_idx = 0;
for (i, &c_level) in C_LEVELS.iter().enumerate() {
let conv_block = if i > 0 {
let cfg = candle_nn::Conv2dConfig {
padding: 1,
stride: 2,
..Default::default()
};
let block = candle_nn::conv2d(C_LEVELS[i - 1], c_level, 4, cfg, vb_d.pp(d_idx))?;
d_idx += 1;
Some(block)
} else {
None
};
let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_d.pp(d_idx))?;
d_idx += 1;
down_blocks.push((conv_block, res_block))
}
let vb_d = vb_d.pp(d_idx);
let down_blocks_conv = candle_nn::conv2d_no_bias(
C_LEVELS[1],
LATENT_CHANNELS,
1,
Default::default(),
vb_d.pp(0),
)?;
let down_blocks_bn = candle_nn::batch_norm(LATENT_CHANNELS, 1e-5, vb_d.pp(1))?;
let mut up_blocks = Vec::new();
let vb_u = vb.pp("up_blocks");
let mut u_idx = 0;
let up_blocks_conv = candle_nn::conv2d(
LATENT_CHANNELS,
C_LEVELS[1],
1,
Default::default(),
vb_u.pp(u_idx).pp(0),
)?;
u_idx += 1;
for (i, &c_level) in C_LEVELS.iter().rev().enumerate() {
let mut res_blocks = Vec::new();
let n_bottleneck_blocks = if i == 0 { BOTTLENECK_BLOCKS } else { 1 };
for _j in 0..n_bottleneck_blocks {
let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_u.pp(u_idx))?;
u_idx += 1;
res_blocks.push(res_block)
}
let conv_block = if i < C_LEVELS.len() - 1 {
let cfg = candle_nn::ConvTranspose2dConfig {
padding: 1,
stride: 2,
..Default::default()
};
let block = candle_nn::conv_transpose2d(
c_level,
C_LEVELS[C_LEVELS.len() - i - 2],
4,
cfg,
vb_u.pp(u_idx),
)?;
u_idx += 1;
Some(block)
} else {
None
};
up_blocks.push((res_blocks, conv_block))
}
Ok(Self {
in_block_conv,
down_blocks,
down_blocks_conv,
down_blocks_bn,
up_blocks,
up_blocks_conv,
out_block_conv,
})
}
pub fn encode(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = candle_nn::ops::pixel_unshuffle(xs, 2)?.apply(&self.in_block_conv)?;
for down_block in self.down_blocks.iter() {
if let Some(conv) = &down_block.0 {
xs = xs.apply(conv)?
}
xs = xs.apply(&down_block.1)?
}
xs.apply(&self.down_blocks_conv)?
.apply_t(&self.down_blocks_bn, false)
}
pub fn decode(&self, xs: &Tensor) -> Result<Tensor> {
// TODO: quantizer if we want to support `force_not_quantize=False`.
let mut xs = xs.apply(&self.up_blocks_conv)?;
for up_block in self.up_blocks.iter() {
for b in up_block.0.iter() {
xs = xs.apply(b)?;
}
if let Some(conv) = &up_block.1 {
xs = xs.apply(conv)?
}
}
xs.apply(&self.out_block_conv)?
.apply(&|xs: &_| candle_nn::ops::pixel_shuffle(xs, 2))
}
}
impl Module for PaellaVQ {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.decode(&self.encode(xs)?)
}
}
| candle/candle-transformers/src/models/wuerstchen/paella_vq.rs/0 | {
"file_path": "candle/candle-transformers/src/models/wuerstchen/paella_vq.rs",
"repo_id": "candle",
"token_count": 4078
} | 38 |
use candle_transformers::models::bert;
use wasm_bindgen::prelude::*;
pub use bert::{BertModel, Config, DTYPE};
pub use tokenizers::{PaddingParams, Tokenizer};
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
}
#[macro_export]
macro_rules! console_log {
// Note that this is using the `log` function imported above during
// `bare_bones`
($($t:tt)*) => ($crate::log(&format_args!($($t)*).to_string()))
}
| candle/candle-wasm-examples/bert/src/lib.rs/0 | {
"file_path": "candle/candle-wasm-examples/bert/src/lib.rs",
"repo_id": "candle",
"token_count": 226
} | 39 |
use crate::console_log;
use crate::worker::{ModelData, Worker, WorkerInput, WorkerOutput};
use std::str::FromStr;
use wasm_bindgen::prelude::*;
use wasm_bindgen_futures::JsFuture;
use yew::{html, Component, Context, Html};
use yew_agent::{Bridge, Bridged};
async fn fetch_url(url: &str) -> Result<Vec<u8>, JsValue> {
use web_sys::{Request, RequestCache, RequestInit, RequestMode, Response};
let window = web_sys::window().ok_or("window")?;
let mut opts = RequestInit::new();
let opts = opts
.method("GET")
.mode(RequestMode::Cors)
.cache(RequestCache::NoCache);
let request = Request::new_with_str_and_init(url, opts)?;
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
let data = JsFuture::from(resp.blob()?).await?;
let blob = web_sys::Blob::from(data);
let array_buffer = JsFuture::from(blob.array_buffer()).await?;
let data = js_sys::Uint8Array::new(&array_buffer).to_vec();
Ok(data)
}
pub enum Msg {
Refresh,
Run,
UpdateStatus(String),
SetModel(ModelData),
WorkerIn(WorkerInput),
WorkerOut(Result<WorkerOutput, String>),
}
pub struct CurrentDecode {
start_time: Option<f64>,
}
pub struct App {
status: String,
loaded: bool,
temperature: std::rc::Rc<std::cell::RefCell<f64>>,
top_p: std::rc::Rc<std::cell::RefCell<f64>>,
prompt: std::rc::Rc<std::cell::RefCell<String>>,
generated: String,
n_tokens: usize,
current_decode: Option<CurrentDecode>,
worker: Box<dyn Bridge<Worker>>,
}
async fn model_data_load() -> Result<ModelData, JsValue> {
let tokenizer = fetch_url("tokenizer.json").await?;
let model = fetch_url("model.bin").await?;
console_log!("{}", model.len());
Ok(ModelData { tokenizer, model })
}
fn performance_now() -> Option<f64> {
let window = web_sys::window()?;
let performance = window.performance()?;
Some(performance.now() / 1000.)
}
impl Component for App {
type Message = Msg;
type Properties = ();
fn create(ctx: &Context<Self>) -> Self {
let status = "loading weights".to_string();
let cb = {
let link = ctx.link().clone();
move |e| link.send_message(Self::Message::WorkerOut(e))
};
let worker = Worker::bridge(std::rc::Rc::new(cb));
Self {
status,
n_tokens: 0,
temperature: std::rc::Rc::new(std::cell::RefCell::new(0.)),
top_p: std::rc::Rc::new(std::cell::RefCell::new(1.0)),
prompt: std::rc::Rc::new(std::cell::RefCell::new("".to_string())),
generated: String::new(),
current_decode: None,
worker,
loaded: false,
}
}
fn rendered(&mut self, ctx: &Context<Self>, first_render: bool) {
if first_render {
ctx.link().send_future(async {
match model_data_load().await {
Err(err) => {
let status = format!("{err:?}");
Msg::UpdateStatus(status)
}
Ok(model_data) => Msg::SetModel(model_data),
}
});
}
}
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
match msg {
Msg::SetModel(md) => {
self.status = "weights loaded successfully!".to_string();
self.loaded = true;
console_log!("loaded weights");
self.worker.send(WorkerInput::ModelData(md));
true
}
Msg::Run => {
if self.current_decode.is_some() {
self.status = "already generating some sample at the moment".to_string()
} else {
let start_time = performance_now();
self.current_decode = Some(CurrentDecode { start_time });
self.status = "generating...".to_string();
self.n_tokens = 0;
self.generated.clear();
let temp = *self.temperature.borrow();
let top_p = *self.top_p.borrow();
let prompt = self.prompt.borrow().clone();
console_log!("temp: {}, top_p: {}, prompt: {}", temp, top_p, prompt);
ctx.link()
.send_message(Msg::WorkerIn(WorkerInput::Run(temp, top_p, prompt)))
}
true
}
Msg::WorkerOut(output) => {
match output {
Ok(WorkerOutput::WeightsLoaded) => self.status = "weights loaded!".to_string(),
Ok(WorkerOutput::GenerationDone(Err(err))) => {
self.status = format!("error in worker process: {err}");
self.current_decode = None
}
Ok(WorkerOutput::GenerationDone(Ok(()))) => {
let dt = self.current_decode.as_ref().and_then(|current_decode| {
current_decode.start_time.and_then(|start_time| {
performance_now().map(|stop_time| stop_time - start_time)
})
});
self.status = match dt {
None => "generation succeeded!".to_string(),
Some(dt) => format!(
"generation succeeded in {:.2}s ({:.1} ms/token)",
dt,
dt * 1000.0 / (self.n_tokens as f64)
),
};
self.current_decode = None
}
Ok(WorkerOutput::Generated(token)) => {
self.n_tokens += 1;
self.generated.push_str(&token)
}
Err(err) => {
self.status = format!("error in worker {err:?}");
}
}
true
}
Msg::WorkerIn(inp) => {
self.worker.send(inp);
true
}
Msg::UpdateStatus(status) => {
self.status = status;
true
}
Msg::Refresh => true,
}
}
fn view(&self, ctx: &Context<Self>) -> Html {
use yew::TargetCast;
let temperature = self.temperature.clone();
let oninput_temperature = ctx.link().callback(move |e: yew::InputEvent| {
let input: web_sys::HtmlInputElement = e.target_unchecked_into();
if let Ok(temp) = f64::from_str(&input.value()) {
*temperature.borrow_mut() = temp
}
Msg::Refresh
});
let top_p = self.top_p.clone();
let oninput_top_p = ctx.link().callback(move |e: yew::InputEvent| {
let input: web_sys::HtmlInputElement = e.target_unchecked_into();
if let Ok(top_p_input) = f64::from_str(&input.value()) {
*top_p.borrow_mut() = top_p_input
}
Msg::Refresh
});
let prompt = self.prompt.clone();
let oninput_prompt = ctx.link().callback(move |e: yew::InputEvent| {
let input: web_sys::HtmlInputElement = e.target_unchecked_into();
*prompt.borrow_mut() = input.value();
Msg::Refresh
});
html! {
<div style="margin: 2%;">
<div><p>{"Running "}
<a href="https://github.com/karpathy/llama2.c" target="_blank">{"llama2.c"}</a>
{" in the browser using rust/wasm with "}
<a href="https://github.com/huggingface/candle" target="_blank">{"candle!"}</a>
</p>
<p>{"Once the weights have loaded, click on the run button to start generating content."}
</p>
</div>
{"temperature \u{00a0} "}
<input type="range" min="0." max="1.2" step="0.1" value={self.temperature.borrow().to_string()} oninput={oninput_temperature} id="temp"/>
{format!(" \u{00a0} {}", self.temperature.borrow())}
<br/ >
{"top_p \u{00a0} "}
<input type="range" min="0." max="1.0" step="0.05" value={self.top_p.borrow().to_string()} oninput={oninput_top_p} id="top_p"/>
{format!(" \u{00a0} {}", self.top_p.borrow())}
<br/ >
{"prompt: "}<input type="text" value={self.prompt.borrow().to_string()} oninput={oninput_prompt} id="prompt"/>
<br/ >
{
if self.loaded{
html!(<button class="button" onclick={ctx.link().callback(move |_| Msg::Run)}> { "run" }</button>)
}else{
html! { <progress id="progress-bar" aria-label="Loading weights..."></progress> }
}
}
<br/ >
<h3>
{&self.status}
</h3>
{
if self.current_decode.is_some() {
html! { <progress id="progress-bar" aria-label="generating…"></progress> }
} else {
html! {}
}
}
<blockquote>
<p> { self.generated.chars().map(|c|
if c == '\r' || c == '\n' {
html! { <br/> }
} else {
html! { {c} }
}).collect::<Html>()
} </p>
</blockquote>
</div>
}
}
}
| candle/candle-wasm-examples/llama2-c/src/app.rs/0 | {
"file_path": "candle/candle-wasm-examples/llama2-c/src/app.rs",
"repo_id": "candle",
"token_count": 5458
} | 40 |
## Running Whisper Examples
Here, we provide two examples of how to run Whisper using a Candle-compiled WASM binary and runtimes.
### Pure Rust UI
To build and test the UI made in Rust you will need [Trunk](https://trunkrs.dev/#install)
From the `candle-wasm-examples/whisper` directory run:
Download assets:
```bash
# mel filters
wget -c https://huggingface.co/spaces/lmz/candle-whisper/resolve/main/mel_filters.safetensors
# Model and tokenizer tiny.en
wget -c https://huggingface.co/openai/whisper-tiny.en/resolve/main/model.safetensors -P whisper-tiny.en
wget -c https://huggingface.co/openai/whisper-tiny.en/raw/main/tokenizer.json -P whisper-tiny.en
wget -c https://huggingface.co/openai/whisper-tiny.en/raw/main/config.json -P whisper-tiny.en
# model and tokenizer tiny multilanguage
wget -c https://huggingface.co/openai/whisper-tiny/resolve/main/model.safetensors -P whisper-tiny
wget -c https://huggingface.co/openai/whisper-tiny/raw/main/tokenizer.json -P whisper-tiny
wget -c https://huggingface.co/openai/whisper-tiny/raw/main/config.json -P whisper-tiny
#quantized
wget -c https://huggingface.co/lmz/candle-whisper/resolve/main/model-tiny-en-q80.gguf -P quantized
wget -c https://huggingface.co/lmz/candle-whisper/raw/main/tokenizer-tiny-en.json -P quantized
wget -c https://huggingface.co/lmz/candle-whisper/raw/main/config-tiny-en.json -P quantized
# Audio samples
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_gb0.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_a13.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_gb1.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_hp0.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_jfk.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_mm0.wav -P audios
```
Run hot reload server:
```bash
trunk serve --release --public-url / --port 8080
```
### Vanilla JS and WebWorkers
To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library:
```bash
sh build-lib.sh
```
This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module:
```js
import init, { Decoder } from "./build/m.js";
```
The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything.
Finally, you can preview the example by running a local HTTP server. For example:
```bash
python -m http.server
```
Then open `http://localhost:8000/lib-example.html` in your browser.
| candle/candle-wasm-examples/whisper/README.md/0 | {
"file_path": "candle/candle-wasm-examples/whisper/README.md",
"repo_id": "candle",
"token_count": 1023
} | 41 |
{
"moz:firefoxOptions": {
"prefs": {
"media.navigator.streams.fake": true,
"media.navigator.permission.disabled": true
},
"args": []
},
"goog:chromeOptions": {
"args": [
"--use-fake-device-for-media-stream",
"--use-fake-ui-for-media-stream"
]
}
}
| candle/candle-wasm-tests/webdriver.json/0 | {
"file_path": "candle/candle-wasm-tests/webdriver.json",
"repo_id": "candle",
"token_count": 143
} | 42 |
ENV_LOCAL_PATH=/app/.env.local
if test -z "${DOTENV_LOCAL}" ; then
if ! test -f "${ENV_LOCAL_PATH}" ; then
echo "DOTENV_LOCAL was not found in the ENV variables and .env.local is not set using a bind volume. We are using the default .env config."
fi;
else
echo "DOTENV_LOCAL was found in the ENV variables. Creating .env.local file."
cat <<< "$DOTENV_LOCAL" > ${ENV_LOCAL_PATH}
fi;
if [ "$INCLUDE_DB" = "true" ] ; then
echo "INCLUDE_DB is set to true."
MONGODB_CONFIG="MONGODB_URL=mongodb://localhost:27017"
if ! grep -q "^${MONGODB_CONFIG}$" ${ENV_LOCAL_PATH}; then
echo "Appending MONGODB_URL"
touch /app/.env.local
echo -e "\n${MONGODB_CONFIG}" >> ${ENV_LOCAL_PATH}
fi
mkdir -p /data/db
mongod &
echo "Starting local MongoDB instance"
fi;
npm run build
npm run preview -- --host 0.0.0.0 --port 3000 | chat-ui/entrypoint.sh/0 | {
"file_path": "chat-ui/entrypoint.sh",
"repo_id": "chat-ui",
"token_count": 385
} | 43 |
<script lang="ts">
import CarbonContinue from "~icons/carbon/continue";
export let classNames = "";
</script>
<button
type="button"
on:click
class="btn flex h-8 rounded-lg border bg-white px-3 py-1 text-gray-500 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600 {classNames}"
>
<CarbonContinue class="mr-2 text-xs " /> Continue
</button>
| chat-ui/src/lib/components/ContinueBtn.svelte/0 | {
"file_path": "chat-ui/src/lib/components/ContinueBtn.svelte",
"repo_id": "chat-ui",
"token_count": 149
} | 44 |
<script lang="ts">
import { fade } from "svelte/transition";
import IconDazzled from "$lib/components/icons/IconDazzled.svelte";
export let message = "";
</script>
<div
transition:fade|global={{ duration: 300 }}
class="pointer-events-none fixed right-0 top-12 z-20 bg-gradient-to-bl from-red-500/20 via-red-500/0 to-red-500/0 pb-36 pl-36 pr-2 pt-2 md:top-0 md:pr-8 md:pt-5"
>
<div
class="pointer-events-auto flex items-center rounded-full bg-white/90 px-3 py-1 shadow-sm dark:bg-gray-900/80"
>
<IconDazzled classNames="text-2xl mr-2" />
<h2 class="font-semibold">{message}</h2>
</div>
</div>
| chat-ui/src/lib/components/Toast.svelte/0 | {
"file_path": "chat-ui/src/lib/components/Toast.svelte",
"repo_id": "chat-ui",
"token_count": 259
} | 45 |
<script lang="ts">
export let classNames = "";
</script>
<svg
xmlns="http://www.w3.org/2000/svg"
class={classNames}
width="1em"
height="1em"
fill="none"
viewBox="0 0 32 32"
><path
fill="currentColor"
fill-rule="evenodd"
d="M3.143 20.286h4.286v2.142H3.143A2.143 2.143 0 0 1 1 20.287V3.143A2.143 2.143 0 0 1 3.143 1h17.143a2.143 2.143 0 0 1 2.142 2.143v4.286h-2.142V3.143H3.143v17.143Zm9.643-12.857v3.214H16v2.143h-3.214V16h-2.143v-3.214H7.429v-2.143h3.214V7.429h2.143Zm14.185 2.639 3.533 3.532a1.7 1.7 0 0 1 0 2.4L15.5 31H9.57v-5.928l15-15.004a1.7 1.7 0 0 1 2.4 0Zm-15.257 18.79h2.897l10.116-10.116-2.899-2.897L11.714 25.96v2.897ZM23.346 14.33l2.897 2.897 2.429-2.43-2.897-2.896-2.43 2.429Z"
clip-rule="evenodd"
/></svg
>
| chat-ui/src/lib/components/icons/IconNew.svelte/0 | {
"file_path": "chat-ui/src/lib/components/icons/IconNew.svelte",
"repo_id": "chat-ui",
"token_count": 426
} | 46 |
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type OpenAI from "openai";
import type { Stream } from "openai/streaming";
/**
* Transform a stream of OpenAI.Chat.ChatCompletion into a stream of TextGenerationStreamOutput
*/
export async function* openAIChatToTextGenerationStream(
completionStream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>
) {
let generatedText = "";
let tokenId = 0;
for await (const completion of completionStream) {
const { choices } = completion;
const content = choices[0]?.delta?.content ?? "";
const last = choices[0]?.finish_reason === "stop";
if (content) {
generatedText = generatedText + content;
}
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: content ?? "",
logprob: 0,
special: false,
},
generated_text: last ? generatedText : null,
details: null,
};
yield output;
}
}
| chat-ui/src/lib/server/endpoints/openai/openAIChatToTextGenerationStream.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/openai/openAIChatToTextGenerationStream.ts",
"repo_id": "chat-ui",
"token_count": 320
} | 47 |
import { writable } from "svelte/store";
export const isAborted = writable<boolean>(false);
| chat-ui/src/lib/stores/isAborted.ts/0 | {
"file_path": "chat-ui/src/lib/stores/isAborted.ts",
"repo_id": "chat-ui",
"token_count": 30
} | 48 |
import { defaultModel } from "$lib/server/models";
import type { Assistant } from "./Assistant";
import type { Timestamps } from "./Timestamps";
import type { User } from "./User";
export interface Settings extends Timestamps {
userId?: User["_id"];
sessionId?: string;
/**
* Note: Only conversations with this settings explicitly set to true should be shared.
*
* This setting is explicitly set to true when users accept the ethics modal.
* */
shareConversationsWithModelAuthors: boolean;
ethicsModalAcceptedAt: Date | null;
activeModel: string;
hideEmojiOnSidebar?: boolean;
// model name and system prompts
customPrompts?: Record<string, string>;
assistants?: Assistant["_id"][];
}
// TODO: move this to a constant file along with other constants
export const DEFAULT_SETTINGS = {
shareConversationsWithModelAuthors: true,
activeModel: defaultModel.id,
hideEmojiOnSidebar: false,
customPrompts: {},
assistants: [],
};
| chat-ui/src/lib/types/Settings.ts/0 | {
"file_path": "chat-ui/src/lib/types/Settings.ts",
"repo_id": "chat-ui",
"token_count": 289
} | 49 |
import * as fs from "fs";
import { setGlobalDispatcher, Agent } from "undici";
/**
* Load client certificates for mutual TLS authentication. This function must be called before any HTTP requests are made.
* This is a global setting that affects all HTTP requests made by the application using the native fetch API.
*
* @param clientCertPath Path to client certificate
* @param clientKeyPath Path to client key
* @param caCertPath Path to CA certificate [optional]
* @param clientKeyPassword Password for client key [optional]
* @param rejectUnauthorized Reject unauthorized certificates.
* Only use for testing/development, not recommended in production environments [optional]
*
* @returns void
*
* @example
* ```typescript
* loadClientCertificates("cert.pem", "key.pem", "ca.pem", "password", false);
* ```
*
* @see
* [Undici Agent](https://undici.nodejs.org/#/docs/api/Agent)
* @see
* [Undici Dispatcher](https://undici.nodejs.org/#/docs/api/Dispatcher)
* @see
* [NodeJS Native Fetch API](https://nodejs.org/docs/latest-v19.x/api/globals.html#fetch)
*/
export function loadClientCertificates(
clientCertPath: string,
clientKeyPath: string,
caCertPath?: string,
clientKeyPassword?: string,
rejectUnauthorized?: boolean
): void {
const clientCert = fs.readFileSync(clientCertPath);
const clientKey = fs.readFileSync(clientKeyPath);
const caCert = caCertPath ? fs.readFileSync(caCertPath) : undefined;
const agent = new Agent({
connect: {
cert: clientCert,
key: clientKey,
ca: caCert,
passphrase: clientKeyPassword,
rejectUnauthorized,
},
});
setGlobalDispatcher(agent);
}
| chat-ui/src/lib/utils/loadClientCerts.ts/0 | {
"file_path": "chat-ui/src/lib/utils/loadClientCerts.ts",
"repo_id": "chat-ui",
"token_count": 551
} | 50 |
import { models } from "$lib/server/models";
export async function GET() {
const res = models.map((model) => ({
id: model.id,
name: model.name,
websiteUrl: model.websiteUrl,
modelUrl: model.modelUrl,
datasetName: model.datasetName,
datasetUrl: model.datasetUrl,
displayName: model.displayName,
description: model.description,
promptExamples: model.promptExamples,
preprompt: model.preprompt,
multimodal: model.multimodal,
unlisted: model.unlisted,
}));
return Response.json(res);
}
| chat-ui/src/routes/api/models/+server.ts/0 | {
"file_path": "chat-ui/src/routes/api/models/+server.ts",
"repo_id": "chat-ui",
"token_count": 193
} | 51 |
import { authCondition } from "$lib/server/auth";
import { collections } from "$lib/server/database";
import { error } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
/**
* Ideally, we'd be able to detect the client-side abort, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
*/
export async function POST({ params, locals }) {
const conversationId = new ObjectId(params.id);
const conversation = await collections.conversations.findOne({
_id: conversationId,
...authCondition(locals),
});
if (!conversation) {
throw error(404, "Conversation not found");
}
await collections.abortedGenerations.updateOne(
{ conversationId },
{ $set: { updatedAt: new Date() }, $setOnInsert: { createdAt: new Date() } },
{ upsert: true }
);
return new Response();
}
| chat-ui/src/routes/conversation/[id]/stop-generating/+server.ts/0 | {
"file_path": "chat-ui/src/routes/conversation/[id]/stop-generating/+server.ts",
"repo_id": "chat-ui",
"token_count": 261
} | 52 |
<script lang="ts">
import { enhance } from "$app/forms";
import { base } from "$app/paths";
import { page } from "$app/stores";
import { PUBLIC_ORIGIN, PUBLIC_SHARE_PREFIX } from "$env/static/public";
import { useSettingsStore } from "$lib/stores/settings";
import type { PageData } from "./$types";
import CarbonPen from "~icons/carbon/pen";
import CarbonTrash from "~icons/carbon/trash-can";
import CarbonCopy from "~icons/carbon/copy-file";
import CarbonFlag from "~icons/carbon/flag";
import CarbonLink from "~icons/carbon/link";
import CopyToClipBoardBtn from "$lib/components/CopyToClipBoardBtn.svelte";
export let data: PageData;
$: assistant = data.assistants.find((el) => el._id.toString() === $page.params.assistantId);
const settings = useSettingsStore();
$: isActive = $settings.activeModel === $page.params.assistantId;
const prefix = PUBLIC_SHARE_PREFIX || `${PUBLIC_ORIGIN || $page.url.origin}${base}`;
$: shareUrl = `${prefix}/assistant/${assistant?._id}`;
</script>
<div class="flex h-full flex-col gap-2">
<div class="flex gap-6">
{#if assistant?.avatar}
<!-- crop image if not square -->
<img
src={`${base}/settings/assistants/${assistant?._id}/avatar.jpg?hash=${assistant?.avatar}`}
alt="Avatar"
class="size-16 flex-none rounded-full object-cover sm:size-24"
/>
{:else}
<div
class="flex size-16 flex-none items-center justify-center rounded-full bg-gray-300 text-4xl font-semibold uppercase text-gray-500 sm:size-24"
>
{assistant?.name[0]}
</div>
{/if}
<div class="flex-1">
<div class="mb-1.5">
<h1 class="mr-2 inline text-xl font-semibold">
{assistant?.name}
</h1>
<span class="rounded-full border px-2 py-0.5 text-sm leading-none text-gray-500"
>public</span
>
</div>
{#if assistant?.description}
<p class="mb-2 line-clamp-2 text-sm text-gray-500">
{assistant.description}
</p>
{/if}
<p class="text-sm text-gray-500">
Model: <span class="font-semibold"> {assistant?.modelId} </span>
<span class="text-gray-300">•</span> Created by
<a class="underline" target="_blank" href={"https://hf.co/" + assistant?.createdByName}>
{assistant?.createdByName}
</a>
</p>
<div
class="flex items-center gap-4 whitespace-nowrap text-sm text-gray-500 hover:*:text-gray-800"
>
<button
class="{isActive
? 'bg-gray-100 text-gray-800'
: 'bg-black !text-white'} my-2 flex w-fit items-center rounded-full px-3 py-1 text-base"
disabled={isActive}
name="Activate model"
on:click|stopPropagation={() => {
$settings.activeModel = $page.params.assistantId;
}}
>
{isActive ? "Active" : "Activate"}
</button>
{#if assistant?.createdByMe}
<a href="{base}/settings/assistants/{assistant?._id}/edit" class="underline"
><CarbonPen class="mr-1.5 inline text-xs" />Edit
</a>
<form method="POST" action="?/delete" use:enhance>
<button type="submit" class="flex items-center underline">
<CarbonTrash class="mr-1.5 inline text-xs" />Delete</button
>
</form>
{:else}
<form method="POST" action="?/unsubscribe" use:enhance>
<button type="submit" class="underline">
<CarbonTrash class="mr-1.5 inline text-xs" />Remove</button
>
</form>
<form method="POST" action="?/edit" use:enhance class="hidden">
<button type="submit" class="underline">
<CarbonCopy class="mr-1.5 inline text-xs" />Duplicate</button
>
</form>
{#if !assistant?.reported}
<form method="POST" action="?/report" use:enhance>
<button type="submit" class="underline">
<CarbonFlag class="mr-1.5 inline text-xs" />Report</button
>
</form>
{:else}
<button type="button" disabled class="text-gray-700">
<CarbonFlag class="mr-1.5 inline text-xs" />Reported</button
>
{/if}
{/if}
</div>
</div>
</div>
<div>
<h2 class="text-lg font-semibold">Direct URL</h2>
<p class="pb-2 text-sm text-gray-500">Share this link for people to use your assistant.</p>
<div
class="flex flex-row gap-2 rounded-lg border-2 border-gray-200 bg-gray-100 py-2 pl-3 pr-1.5"
>
<input disabled class="flex-1 truncate bg-inherit" value={shareUrl} />
<CopyToClipBoardBtn
value={shareUrl}
classNames="!border-none !shadow-none !py-0 !px-1 !rounded-md"
>
<div class="flex items-center gap-1.5 text-gray-500 hover:underline">
<CarbonLink />Copy
</div>
</CopyToClipBoardBtn>
</div>
</div>
<h2 class="mt-4 text-lg font-semibold">System Instructions</h2>
<textarea
disabled
class="min-h-[8lh] w-full flex-1 rounded-lg border-2 border-gray-200 bg-gray-100 p-2 disabled:cursor-not-allowed 2xl:min-h-[12lh]"
>{assistant?.preprompt}</textarea
>
</div>
| chat-ui/src/routes/settings/assistants/[assistantId]/+page.svelte/0 | {
"file_path": "chat-ui/src/routes/settings/assistants/[assistantId]/+page.svelte",
"repo_id": "chat-ui",
"token_count": 2100
} | 53 |
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none">
<path
fill="#2063EC"
d="M4 15.55C4 9.72 8.72 5 14.55 5h4.11a9.34 9.34 0 1 1 0 18.68H7.58l-2.89 2.8a.41.41 0 0 1-.69-.3V15.55Z"
/>
</svg>
| chat-ui/static/chatui/logo.svg/0 | {
"file_path": "chat-ui/static/chatui/logo.svg",
"repo_id": "chat-ui",
"token_count": 125
} | 54 |
# Add patterns of files dvc should ignore, which could improve
# the performance. Learn more at
# https://dvc.org/doc/user-guide/dvcignore
| datasets/.dvcignore/0 | {
"file_path": "datasets/.dvcignore",
"repo_id": "datasets",
"token_count": 40
} | 55 |
<p align="center">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-dark.svg">
<source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-light.svg">
<img alt="Hugging Face Datasets Library" src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-light.svg" width="352" height="59" style="max-width: 100%;">
</picture>
<br/>
<br/>
</p>
<p align="center">
<a href="https://github.com/huggingface/datasets/actions/workflows/ci.yml?query=branch%3Amain">
<img alt="Build" src="https://github.com/huggingface/datasets/actions/workflows/ci.yml/badge.svg?branch=main">
</a>
<a href="https://github.com/huggingface/datasets/blob/main/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue">
</a>
<a href="https://huggingface.co/docs/datasets/index.html">
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/datasets/index.html.svg?down_color=red&down_message=offline&up_message=online">
</a>
<a href="https://github.com/huggingface/datasets/releases">
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/datasets.svg">
</a>
<a href="https://huggingface.co/datasets/">
<img alt="Number of datasets" src="https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/datasets&color=brightgreen">
</a>
<a href="CODE_OF_CONDUCT.md">
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg">
</a>
<a href="https://zenodo.org/badge/latestdoi/250213286"><img src="https://zenodo.org/badge/250213286.svg" alt="DOI"></a>
</p>
🤗 Datasets is a lightweight library providing **two** main features:
- **one-line dataloaders for many public datasets**: one-liners to download and pre-process any of the  major public datasets (image datasets, audio datasets, text datasets in 467 languages and dialects, etc.) provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets). With a simple command like `squad_dataset = load_dataset("squad")`, get any of these datasets ready to use in a dataloader for training/evaluating a ML model (Numpy/Pandas/PyTorch/TensorFlow/JAX),
- **efficient data pre-processing**: simple, fast and reproducible data pre-processing for the public datasets as well as your own local datasets in CSV, JSON, text, PNG, JPEG, WAV, MP3, Parquet, etc. With simple commands like `processed_dataset = dataset.map(process_example)`, efficiently prepare the dataset for inspection and ML model evaluation and training.
[🎓 **Documentation**](https://huggingface.co/docs/datasets/) [🔎 **Find a dataset in the Hub**](https://huggingface.co/datasets) [🌟 **Share a dataset on the Hub**](https://huggingface.co/docs/datasets/share)
<h3 align="center">
<a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/datasets/main/docs/source/imgs/course_banner.png"></a>
</h3>
🤗 Datasets is designed to let the community easily add and share new datasets.
🤗 Datasets has many additional interesting features:
- Thrive on large datasets: 🤗 Datasets naturally frees the user from RAM memory limitation, all datasets are memory-mapped using an efficient zero-serialization cost backend (Apache Arrow).
- Smart caching: never wait for your data to process several times.
- Lightweight and fast with a transparent and pythonic API (multi-processing/caching/memory-mapping).
- Built-in interoperability with NumPy, pandas, PyTorch, TensorFlow 2 and JAX.
- Native support for audio and image data.
- Enable streaming mode to save disk space and start iterating over the dataset immediately.
🤗 Datasets originated from a fork of the awesome [TensorFlow Datasets](https://github.com/tensorflow/datasets) and the HuggingFace team want to deeply thank the TensorFlow Datasets team for building this amazing library. More details on the differences between 🤗 Datasets and `tfds` can be found in the section [Main differences between 🤗 Datasets and `tfds`](#main-differences-between--datasets-and-tfds).
# Installation
## With pip
🤗 Datasets can be installed from PyPi and has to be installed in a virtual environment (venv or conda for instance)
```bash
pip install datasets
```
## With conda
🤗 Datasets can be installed using conda as follows:
```bash
conda install -c huggingface -c conda-forge datasets
```
Follow the installation pages of TensorFlow and PyTorch to see how to install them with conda.
For more details on installation, check the installation page in the documentation: https://huggingface.co/docs/datasets/installation
## Installation to use with PyTorch/TensorFlow/pandas
If you plan to use 🤗 Datasets with PyTorch (1.0+), TensorFlow (2.2+) or pandas, you should also install PyTorch, TensorFlow or pandas.
For more details on using the library with NumPy, pandas, PyTorch or TensorFlow, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart
# Usage
🤗 Datasets is made to be very simple to use - the API is centered around a single function, `datasets.load_dataset(dataset_name, **kwargs)`, that instantiates a dataset.
This library can be used for text/image/audio/etc. datasets. Here is an example to load a text dataset:
Here is a quick example:
```python
from datasets import load_dataset
# Print all the available datasets
from huggingface_hub import list_datasets
print([dataset.id for dataset in list_datasets()])
# Load a dataset and print the first example in the training set
squad_dataset = load_dataset('squad')
print(squad_dataset['train'][0])
# Process the dataset - add a column with the length of the context texts
dataset_with_length = squad_dataset.map(lambda x: {"length": len(x["context"])})
# Process the dataset - tokenize the context texts (using a tokenizer from the 🤗 Transformers library)
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
tokenized_dataset = squad_dataset.map(lambda x: tokenizer(x['context']), batched=True)
```
If your dataset is bigger than your disk or if you don't want to wait to download the data, you can use streaming:
```python
# If you want to use the dataset immediately and efficiently stream the data as you iterate over the dataset
image_dataset = load_dataset('cifar100', streaming=True)
for example in image_dataset["train"]:
break
```
For more details on using the library, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart and the specific pages on:
- Loading a dataset: https://huggingface.co/docs/datasets/loading
- What's in a Dataset: https://huggingface.co/docs/datasets/access
- Processing data with 🤗 Datasets: https://huggingface.co/docs/datasets/process
- Processing audio data: https://huggingface.co/docs/datasets/audio_process
- Processing image data: https://huggingface.co/docs/datasets/image_process
- Processing text data: https://huggingface.co/docs/datasets/nlp_process
- Streaming a dataset: https://huggingface.co/docs/datasets/stream
- Writing your own dataset loading script: https://huggingface.co/docs/datasets/dataset_script
- etc.
# Add a new dataset to the Hub
We have a very detailed step-by-step guide to add a new dataset to the  datasets already provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets).
You can find:
- [how to upload a dataset to the Hub using your web browser or Python](https://huggingface.co/docs/datasets/upload_dataset) and also
- [how to upload it using Git](https://huggingface.co/docs/datasets/share).
# Main differences between 🤗 Datasets and `tfds`
If you are familiar with the great TensorFlow Datasets, here are the main differences between 🤗 Datasets and `tfds`:
- the scripts in 🤗 Datasets are not provided within the library but are queried, downloaded/cached and dynamically loaded upon request
- the backend serialization of 🤗 Datasets is based on [Apache Arrow](https://arrow.apache.org/) instead of TF Records and leverage python dataclasses for info and features with some diverging features (we mostly don't do encoding and store the raw data as much as possible in the backend serialization cache).
- the user-facing dataset object of 🤗 Datasets is not a `tf.data.Dataset` but a built-in framework-agnostic dataset class with methods inspired by what we like in `tf.data` (like a `map()` method). It basically wraps a memory-mapped Arrow table cache.
# Disclaimers
🤗 Datasets may run Python code defined by the dataset authors to parse certain data formats or structures. For security reasons, we ask users to:
- check the dataset scripts they're going to run beforehand and
- pin the `revision` of the repositories they use.
If you're a dataset owner and wish to update any part of it (description, citation, license, etc.), or do not want your dataset to be included in the Hugging Face Hub, please get in touch by opening a discussion or a pull request in the Community tab of the dataset page. Thanks for your contribution to the ML community!
## BibTeX
If you want to cite our 🤗 Datasets library, you can use our [paper](https://arxiv.org/abs/2109.02846):
```bibtex
@inproceedings{lhoest-etal-2021-datasets,
title = "Datasets: A Community Library for Natural Language Processing",
author = "Lhoest, Quentin and
Villanova del Moral, Albert and
Jernite, Yacine and
Thakur, Abhishek and
von Platen, Patrick and
Patil, Suraj and
Chaumond, Julien and
Drame, Mariama and
Plu, Julien and
Tunstall, Lewis and
Davison, Joe and
{\v{S}}a{\v{s}}ko, Mario and
Chhablani, Gunjan and
Malik, Bhavitvya and
Brandeis, Simon and
Le Scao, Teven and
Sanh, Victor and
Xu, Canwen and
Patry, Nicolas and
McMillan-Major, Angelina and
Schmid, Philipp and
Gugger, Sylvain and
Delangue, Cl{\'e}ment and
Matussi{\`e}re, Th{\'e}o and
Debut, Lysandre and
Bekman, Stas and
Cistac, Pierric and
Goehringer, Thibault and
Mustar, Victor and
Lagunas, Fran{\c{c}}ois and
Rush, Alexander and
Wolf, Thomas",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-demo.21",
pages = "175--184",
abstract = "The scale, variety, and quantity of publicly-available NLP datasets has grown rapidly as researchers propose new tasks, larger models, and novel benchmarks. Datasets is a community library for contemporary NLP designed to support this ecosystem. Datasets aims to standardize end-user interfaces, versioning, and documentation, while providing a lightweight front-end that behaves similarly for small datasets as for internet-scale corpora. The design of the library incorporates a distributed, community-driven approach to adding datasets and documenting usage. After a year of development, the library now includes more than 650 unique datasets, has more than 250 contributors, and has helped support a variety of novel cross-dataset research projects and shared tasks. The library is available at https://github.com/huggingface/datasets.",
eprint={2109.02846},
archivePrefix={arXiv},
primaryClass={cs.CL},
}
```
If you need to cite a specific version of our 🤗 Datasets library for reproducibility, you can use the corresponding version Zenodo DOI from this [list](https://zenodo.org/search?q=conceptrecid:%224817768%22&sort=-version&all_versions=True).
| datasets/README.md/0 | {
"file_path": "datasets/README.md",
"repo_id": "datasets",
"token_count": 4002
} | 56 |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Generating the documentation
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
you can install them with the following command, at the root of the code repository:
```bash
pip install -e ".[docs]"
```
Then you need to install our special tool that builds the documentation:
```bash
pip install git+https://github.com/huggingface/doc-builder
```
---
**NOTE**
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
check how they look before committing for instance). You don't have to `git commit` the built documentation.
---
## Building the documentation
Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing
the following command:
```bash
doc-builder build datasets docs/source/ --build_dir ~/tmp/test-build
```
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
Markdown editor.
## Previewing the documentation
To preview the docs, first install the `watchdog` module with:
```bash
pip install watchdog
```
Then run the following command:
```bash
doc-builder preview datasets docs/source/
```
The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
---
**NOTE**
The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
## Adding a new element to the navigation bar
Accepted files are Markdown (.md or .mdx).
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/datasets/blob/main/docs/source/_toctree.yml) file.
## Renaming section headers and moving sections
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
Therefore we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
```
Sections that were moved:
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
```
and of course if you moved it to another file, then:
```
Sections that were moved:
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
```
Use the relative style to link to the new file so that the versioned docs continue to work.
For an example of a rich moved sections set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md).
## Writing Documentation - Specification
The `huggingface/datasets` documentation follows the
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings,
although we can write them directly in Markdown.
### Adding a new tutorial
Adding a new tutorial or section is done in two steps:
- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
Make sure to put your new file under the proper section. If you have a doubt, feel free to ask in a Github Issue or PR.
### Writing source documentation
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
and objects like True, None or any strings should usually be put in `code`.
When mentioning a class, function or method, it is recommended to use our syntax for internal links so that our tool
adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or
function to be in the main package.
If you want to create a link to some internal class or function, you need to
provide its path. For instance: \[\`table.InMemoryTable\`\]. This will be converted into a link with
`table.InMemoryTable` in the description. To get rid of the path and only keep the name of the object you are
linking to in the description, add a ~: \[\`~table.InMemoryTable\`\] will generate a link with `InMemoryTable` in the description.
The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\].
#### Defining arguments in a method
Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon and its
description:
```
Args:
n_layers (`int`): The number of layers of the model.
```
If the description is too long to fit in one line, another indentation is necessary before writing the description
after the argument.
Here's an example showcasing everything so far:
```
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and
[`~PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
```
For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the
following signature:
```
def my_function(x: str = None, a: float = 1):
```
then its documentation should look like this:
```
Args:
x (`str`, *optional*):
This argument controls ...
a (`float`, *optional*, defaults to 1):
This argument is used to ...
```
Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even
if the first line describing your argument type and its default gets long, you can't break it into several lines. You can
however write as many lines as you want in the indented description (see the example above with `input_ids`).
#### Writing a multi-line code block
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
````
```
# first line of code
# second line
# etc
```
````
#### Writing a return block
The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation.
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
building the return.
Here's an example of a single value return:
```
Returns:
`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
```
Here's an example of tuple return, comprising several objects:
```
Returns:
`tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` --
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
- **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
```
#### Adding an image
Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
to this dataset.
## Writing documentation examples
The syntax for Example docstrings can look as follows:
```
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> ds[0:3]["text"]
['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'Review: the soundtrack alone is worth the price of admission .',
'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .']
# process a batch of examples
>>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
# set number of processors
>>> ds = ds.map(add_prefix, num_proc=4)
```
```
The docstring should give a minimal, clear example of how the respective class or function is to be used in practice and also include the expected (ideally sensible) output.
Often, readers will try out the example before even going through the function
or class definitions. Therefore, it is of utmost importance that the example
works as expected.
| datasets/docs/README.md/0 | {
"file_path": "datasets/docs/README.md",
"repo_id": "datasets",
"token_count": 3059
} | 57 |
# Cache management
When you download a dataset, the processing scripts and data are stored locally on your computer. The cache allows 🤗 Datasets to avoid re-downloading or processing the entire dataset every time you use it.
This guide will show you how to:
- Change the cache directory.
- Control how a dataset is loaded from the cache.
- Clean up cache files in the directory.
- Enable or disable caching.
## Cache directory
The default cache directory is `~/.cache/huggingface/datasets`. Change the cache location by setting the shell environment variable, `HF_DATASETS_CACHE` to another directory:
```
$ export HF_DATASETS_CACHE="/path/to/another/directory"
```
When you load a dataset, you also have the option to change where the data is cached. Change the `cache_dir` parameter to the path you want:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('LOADING_SCRIPT', cache_dir="PATH/TO/MY/CACHE/DIR")
```
Similarly, you can change where a metric is cached with the `cache_dir` parameter:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc', cache_dir="MY/CACHE/DIRECTORY")
```
## Download mode
After you download a dataset, control how it is loaded by [`load_dataset`] with the `download_mode` parameter. By default, 🤗 Datasets will reuse a dataset if it exists. But if you need the original dataset without any processing functions applied, re-download the files as shown below:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset('squad', download_mode='force_redownload')
```
Refer to [`DownloadMode`] for a full list of download modes.
## Cache files
Clean up the cache files in the directory with [`Dataset.cleanup_cache_files`]:
```py
# Returns the number of removed cache files
>>> dataset.cleanup_cache_files()
2
```
## Enable or disable caching
If you're using a cached file locally, it will automatically reload the dataset with any previous transforms you applied to the dataset. Disable this behavior by setting the argument `load_from_cache_file=False` in [`Dataset.map`]:
```py
>>> updated_dataset = small_dataset.map(add_prefix, load_from_cache_file=False)
```
In the example above, 🤗 Datasets will execute the function `add_prefix` over the entire dataset again instead of loading the dataset from its previous state.
Disable caching on a global scale with [`disable_caching`]:
```py
>>> from datasets import disable_caching
>>> disable_caching()
```
When you disable caching, 🤗 Datasets will no longer reload cached files when applying transforms to datasets. Any transform you apply on your dataset will be need to be reapplied.
<Tip>
If you want to reuse a dataset from scratch, try setting the `download_mode` parameter in [`load_dataset`] instead.
</Tip>
You can also avoid caching your metric entirely, and keep it in CPU memory instead:
```py
>>> from datasets import load_metric
>>> metric = load_metric('glue', 'mrpc', keep_in_memory=True)
```
<Tip warning={true}>
Keeping the predictions in-memory is not possible in a distributed setting since the CPU memory spaces of the various processes are not shared.
</Tip>
<a id='load_dataset_enhancing_performance'></a>
## Improve performance
Disabling the cache and copying the dataset in-memory will speed up dataset operations. There are two options for copying the dataset in-memory:
1. Set `datasets.config.IN_MEMORY_MAX_SIZE` to a nonzero value (in bytes) that fits in your RAM memory.
2. Set the environment variable `HF_DATASETS_IN_MEMORY_MAX_SIZE` to a nonzero value. Note that the first method takes higher precedence.
| datasets/docs/source/cache.mdx/0 | {
"file_path": "datasets/docs/source/cache.mdx",
"repo_id": "datasets",
"token_count": 1027
} | 58 |
# Installation
Before you start, you'll need to setup your environment and install the appropriate packages. 🤗 Datasets is tested on **Python 3.7+**.
<Tip>
If you want to use 🤗 Datasets with TensorFlow or PyTorch, you'll need to install them separately. Refer to the [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2-packages-are-available) or the [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) for the specific install command for your framework.
</Tip>
## Virtual environment
You should install 🤗 Datasets in a [virtual environment](https://docs.python.org/3/library/venv.html) to keep things tidy and avoid dependency conflicts.
1. Create and navigate to your project directory:
```bash
mkdir ~/my-project
cd ~/my-project
```
2. Start a virtual environment inside your directory:
```bash
python -m venv .env
```
3. Activate and deactivate the virtual environment with the following commands:
```bash
# Activate the virtual environment
source .env/bin/activate
# Deactivate the virtual environment
source .env/bin/deactivate
```
Once you've created your virtual environment, you can install 🤗 Datasets in it.
## pip
The most straightforward way to install 🤗 Datasets is with pip:
```bash
pip install datasets
```
Run the following command to check if 🤗 Datasets has been properly installed:
```bash
python -c "from datasets import load_dataset; print(load_dataset('squad', split='train')[0])"
```
This command downloads version 1 of the [Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer/), loads the training split, and prints the first training example. You should see:
```python
{'answers': {'answer_start': [515], 'text': ['Saint Bernadette Soubirous']}, 'context': 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.', 'id': '5733be284776f41900661182', 'question': 'To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?', 'title': 'University_of_Notre_Dame'}
```
## Audio
To work with audio datasets, you need to install the [`Audio`] feature as an extra dependency:
```bash
pip install datasets[audio]
```
<Tip warning={true}>
To decode mp3 files, you need to have at least version 1.1.0 of the `libsndfile` system library. Usually, it's bundled with the python [`soundfile`](https://github.com/bastibe/python-soundfile) package, which is installed as an extra audio dependency for 🤗 Datasets.
For Linux, the required version of `libsndfile` is bundled with `soundfile` starting from version 0.12.0. You can run the following command to determine which version of `libsndfile` is being used by `soundfile`:
```bash
python -c "import soundfile; print(soundfile.__libsndfile_version__)"
```
</Tip>
## Vision
To work with image datasets, you need to install the [`Image`] feature as an extra dependency:
```bash
pip install datasets[vision]
```
## source
Building 🤗 Datasets from source lets you make changes to the code base. To install from the source, clone the repository and install with the following commands:
```bash
git clone https://github.com/huggingface/datasets.git
cd datasets
pip install -e .
```
Again, you can check if 🤗 Datasets was properly installed with the following command:
```bash
python -c "from datasets import load_dataset; print(load_dataset('squad', split='train')[0])"
```
## conda
🤗 Datasets can also be installed from conda, a package management system:
```bash
conda install -c huggingface -c conda-forge datasets
```
| datasets/docs/source/installation.md/0 | {
"file_path": "datasets/docs/source/installation.md",
"repo_id": "datasets",
"token_count": 1236
} | 59 |
# Semantic segmentation
Semantic segmentation datasets are used to train a model to classify every pixel in an image. There are
a wide variety of applications enabled by these datasets such as background removal from images, stylizing
images, or scene understanding for autonomous driving. This guide will show you how to apply transformations
to an image segmentation dataset.
Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed:
```bash
pip install -U albumentations opencv-python
```
[Albumentations](https://albumentations.ai/) is a Python library for performing data augmentation
for computer vision. It supports various computer vision tasks such as image classification, object
detection, segmentation, and keypoint estimation.
This guide uses the [Scene Parsing](https://huggingface.co/datasets/scene_parse_150) dataset for segmenting
and parsing an image into different image regions associated with semantic categories, such as sky, road, person, and bed.
Load the `train` split of the dataset and take a look at an example:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("scene_parse_150", split="train")
>>> index = 10
>>> dataset[index]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=683x512 at 0x7FB37B0EC810>,
'annotation': <PIL.PngImagePlugin.PngImageFile image mode=L size=683x512 at 0x7FB37B0EC9D0>,
'scene_category': 927}
```
The dataset has three fields:
* `image`: a PIL image object.
* `annotation`: segmentation mask of the image.
* `scene_category`: the label or scene category of the image (like “kitchen” or “office”).
Next, check out an image with:
```py
>>> dataset[index]["image"]
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/image_seg.png">
</div>
Similarly, you can check out the respective segmentation mask:
```py
>>> dataset[index]["annotation"]
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/seg_mask.png">
</div>
We can also add a [color palette](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51) on the
segmentation mask and overlay it on top of the original image to visualize the dataset:
After defining the color palette, you should be ready to visualize some overlays.
```py
>>> import matplotlib.pyplot as plt
>>> def visualize_seg_mask(image: np.ndarray, mask: np.ndarray):
... color_seg = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)
... palette = np.array(create_ade20k_label_colormap())
... for label, color in enumerate(palette):
... color_seg[mask == label, :] = color
... color_seg = color_seg[..., ::-1] # convert to BGR
... img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map
... img = img.astype(np.uint8)
... plt.figure(figsize=(15, 10))
... plt.imshow(img)
... plt.axis("off")
... plt.show()
>>> visualize_seg_mask(
... np.array(dataset[index]["image"]),
... np.array(dataset[index]["annotation"])
... )
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/seg_overlay.png">
</div>
Now apply some augmentations with `albumentations`. You’ll first resize the image and adjust its brightness.
```py
>>> import albumentations
>>> transform = albumentations.Compose(
... [
... albumentations.Resize(256, 256),
... albumentations.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.5),
... ]
... )
```
Create a function to apply the transformation to the images:
```py
>>> def transforms(examples):
... transformed_images, transformed_masks = [], []
...
... for image, seg_mask in zip(examples["image"], examples["annotation"]):
... image, seg_mask = np.array(image), np.array(seg_mask)
... transformed = transform(image=image, mask=seg_mask)
... transformed_images.append(transformed["image"])
... transformed_masks.append(transformed["mask"])
...
... examples["pixel_values"] = transformed_images
... examples["label"] = transformed_masks
... return examples
```
Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space:
```py
>>> dataset.set_transform(transforms)
```
You can verify the transformation worked by indexing into the `pixel_values` and `label` of an example:
```py
>>> image = np.array(dataset[index]["pixel_values"])
>>> mask = np.array(dataset[index]["label"])
>>> visualize_seg_mask(image, mask)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/albumentations_seg.png">
</div>
In this guide, you have used `albumentations` for augmenting the dataset. It's also possible to use `torchvision` to apply some similar transforms.
```py
>>> from torchvision.transforms import Resize, ColorJitter, Compose
>>> transformation_chain = Compose([
... Resize((256, 256)),
... ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
... ])
>>> resize = Resize((256, 256))
>>> def train_transforms(example_batch):
... example_batch["pixel_values"] = [transformation_chain(x) for x in example_batch["image"]]
... example_batch["label"] = [resize(x) for x in example_batch["annotation"]]
... return example_batch
>>> dataset.set_transform(train_transforms)
>>> image = np.array(dataset[index]["pixel_values"])
>>> mask = np.array(dataset[index]["label"])
>>> visualize_seg_mask(image, mask)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/torchvision_seg.png">
</div>
<Tip>
Now that you know how to process a dataset for semantic segmentation, learn
[how to train a semantic segmentation model](https://huggingface.co/docs/transformers/tasks/semantic_segmentation)
and use it for inference.
</Tip> | datasets/docs/source/semantic_segmentation.mdx/0 | {
"file_path": "datasets/docs/source/semantic_segmentation.mdx",
"repo_id": "datasets",
"token_count": 2142
} | 60 |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERTScore metric. """
import functools
from contextlib import contextmanager
import bert_score
from packaging import version
import datasets
@contextmanager
def filter_logging_context():
def filter_log(record):
return False if "This IS expected if you are initializing" in record.msg else True
logger = datasets.utils.logging.get_logger("transformers.modeling_utils")
logger.addFilter(filter_log)
try:
yield
finally:
logger.removeFilter(filter_log)
_CITATION = """\
@inproceedings{bert-score,
title={BERTScore: Evaluating Text Generation with BERT},
author={Tianyi Zhang* and Varsha Kishore* and Felix Wu* and Kilian Q. Weinberger and Yoav Artzi},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=SkeHuCVFDr}
}
"""
_DESCRIPTION = """\
BERTScore leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference
sentences by cosine similarity.
It has been shown to correlate with human judgment on sentence-level and system-level evaluation.
Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language
generation tasks.
See the project's README at https://github.com/Tiiiger/bert_score#readme for more information.
"""
_KWARGS_DESCRIPTION = """
BERTScore Metrics with the hashcode from a source against one or more references.
Args:
predictions (list of str): Prediction/candidate sentences.
references (list of str or list of list of str): Reference sentences.
lang (str): Language of the sentences; required (e.g. 'en').
model_type (str): Bert specification, default using the suggested
model for the target language; has to specify at least one of
`model_type` or `lang`.
num_layers (int): The layer of representation to use,
default using the number of layers tuned on WMT16 correlation data.
verbose (bool): Turn on intermediate status update.
idf (bool or dict): Use idf weighting; can also be a precomputed idf_dict.
device (str): On which the contextual embedding model will be allocated on.
If this argument is None, the model lives on cuda:0 if cuda is available.
nthreads (int): Number of threads.
batch_size (int): Bert score processing batch size,
at least one of `model_type` or `lang`. `lang` needs to be
specified when `rescale_with_baseline` is True.
rescale_with_baseline (bool): Rescale bertscore with pre-computed baseline.
baseline_path (str): Customized baseline file.
use_fast_tokenizer (bool): `use_fast` parameter passed to HF tokenizer. New in version 0.3.10.
Returns:
precision: Precision.
recall: Recall.
f1: F1 score.
hashcode: Hashcode of the library.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bertscore = datasets.load_metric("bertscore")
>>> results = bertscore.compute(predictions=predictions, references=references, lang="en")
>>> print([round(v, 2) for v in results["f1"]])
[1.0, 1.0]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class BERTScore(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/Tiiiger/bert_score",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"),
}
),
codebase_urls=["https://github.com/Tiiiger/bert_score"],
reference_urls=[
"https://github.com/Tiiiger/bert_score",
"https://arxiv.org/abs/1904.09675",
],
)
def _compute(
self,
predictions,
references,
lang=None,
model_type=None,
num_layers=None,
verbose=False,
idf=False,
device=None,
batch_size=64,
nthreads=4,
all_layers=False,
rescale_with_baseline=False,
baseline_path=None,
use_fast_tokenizer=False,
):
get_hash = bert_score.utils.get_hash
scorer = bert_score.BERTScorer
if version.parse(bert_score.__version__) >= version.parse("0.3.10"):
get_hash = functools.partial(get_hash, use_fast_tokenizer=use_fast_tokenizer)
scorer = functools.partial(scorer, use_fast_tokenizer=use_fast_tokenizer)
elif use_fast_tokenizer:
raise ImportWarning(
"To use a fast tokenizer, the module `bert-score>=0.3.10` is required, and the current version of `bert-score` doesn't match this condition.\n"
'You can install it with `pip install "bert-score>=0.3.10"`.'
)
if model_type is None:
assert lang is not None, "either lang or model_type should be specified"
model_type = bert_score.utils.lang2model[lang.lower()]
if num_layers is None:
num_layers = bert_score.utils.model2layers[model_type]
hashcode = get_hash(
model=model_type,
num_layers=num_layers,
idf=idf,
rescale_with_baseline=rescale_with_baseline,
use_custom_baseline=baseline_path is not None,
)
with filter_logging_context():
if not hasattr(self, "cached_bertscorer") or self.cached_bertscorer.hash != hashcode:
self.cached_bertscorer = scorer(
model_type=model_type,
num_layers=num_layers,
batch_size=batch_size,
nthreads=nthreads,
all_layers=all_layers,
idf=idf,
device=device,
lang=lang,
rescale_with_baseline=rescale_with_baseline,
baseline_path=baseline_path,
)
(P, R, F) = self.cached_bertscorer.score(
cands=predictions,
refs=references,
verbose=verbose,
batch_size=batch_size,
)
output_dict = {
"precision": P.tolist(),
"recall": R.tolist(),
"f1": F.tolist(),
"hashcode": hashcode,
}
return output_dict
def add_batch(self, predictions=None, references=None, **kwargs):
"""Add a batch of predictions and references for the metric's stack."""
# References can be strings or lists of strings
# Let's change strings to lists of strings with one element
if references is not None:
references = [[ref] if isinstance(ref, str) else ref for ref in references]
super().add_batch(predictions=predictions, references=references, **kwargs)
def add(self, prediction=None, reference=None, **kwargs):
"""Add one prediction and reference for the metric's stack."""
# References can be strings or lists of strings
# Let's change strings to lists of strings with one element
if isinstance(reference, str):
reference = [reference]
super().add(prediction=prediction, reference=reference, **kwargs)
| datasets/metrics/bertscore/bertscore.py/0 | {
"file_path": "datasets/metrics/bertscore/bertscore.py",
"repo_id": "datasets",
"token_count": 3284
} | 61 |
# Metric Card for COVAL
## Metric description
CoVal is a coreference evaluation tool for the [CoNLL](https://huggingface.co/datasets/conll2003) and [ARRAU](https://catalog.ldc.upenn.edu/LDC2013T22) datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995](https://aclanthology.org/M95-1005.pdf), B-cubed [Bagga and Baldwin, 1998](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.34.2578&rep=rep1&type=pdf), CEAFe [Luo et al., 2005](https://aclanthology.org/H05-1004.pdf), LEA [Moosavi and Strube, 2016](https://aclanthology.org/P16-1060.pdf) and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe).
CoVal code was written by [`@ns-moosavi`](https://github.com/ns-moosavi), with some parts borrowed from [Deep Coref](https://github.com/clarkkev/deep-coref/blob/master/evaluation.py). The test suite is taken from the [official CoNLL code](https://github.com/conll/reference-coreference-scorers/), with additions by [`@andreasvc`](https://github.com/andreasvc) and file parsing developed by Leo Born.
## How to use
The metric takes two lists of sentences as input: one representing `predictions` and `references`, with the sentences consisting of words in the CoNLL format (see the [Limitations and bias](#Limitations-and-bias) section below for more details on the CoNLL format).
```python
from datasets import load_metric
coval = load_metric('coval')
words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
references = [words]
predictions = [words]
results = coval.compute(predictions=predictions, references=references)
```
It also has several optional arguments:
`keep_singletons`: After extracting all mentions of key or system file mentions whose corresponding coreference chain is of size one are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting `keep_singletons=False`, all singletons in the key and system files will be excluded from the evaluation.
`NP_only`: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the `NP_only` option, the scorer will only evaluate the resolution of NPs.
`min_span`: By setting `min_span`, the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the [MINA algorithm](https://arxiv.org/pdf/1906.06703.pdf).
## Output values
The metric outputs a dictionary with the following key-value pairs:
`mentions`: number of mentions, ranges from 0-1
`muc`: MUC metric, which expresses performance in terms of recall and precision, ranging from 0-1.
`bcub`: B-cubed metric, which is the averaged precision of all items in the distribution, ranging from 0-1.
`ceafe`: CEAFe (Constrained Entity Alignment F-Measure) is computed by aligning reference and system entities with the constraint that a reference entity is aligned with at most one reference entity. It ranges from 0-1
`lea`: LEA is a Link-Based Entity-Aware metric which, for each entity, considers how important the entity is and how well it is resolved. It ranges from 0-1.
`conll_score`: averaged CoNLL score (the average of the F1 values of `muc`, `bcub` and `ceafe`), ranging from 0 to 100.
### Values from popular papers
Given that many of the metrics returned by COVAL come from different sources, is it hard to cite reference values for all of them.
The CoNLL score is used to track progress on different datasets such as the [ARRAU corpus](https://paperswithcode.com/sota/coreference-resolution-on-the-arrau-corpus) and [CoNLL 2012](https://paperswithcode.com/sota/coreference-resolution-on-conll-2012).
## Examples
Maximal values
```python
from datasets import load_metric
coval = load_metric('coval')
words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
references = [words]
predictions = [words]
results = coval.compute(predictions=predictions, references=references)
print(results)
{'mentions/recall': 1.0, 'mentions/precision': 1.0, 'mentions/f1': 1.0, 'muc/recall': 1.0, 'muc/precision': 1.0, 'muc/f1': 1.0, 'bcub/recall': 1.0, 'bcub/precision': 1.0, 'bcub/f1': 1.0, 'ceafe/recall': 1.0, 'ceafe/precision': 1.0, 'ceafe/f1': 1.0, 'lea/recall': 1.0, 'lea/precision': 1.0, 'lea/f1': 1.0, 'conll_score': 100.0}
```
## Limitations and bias
This wrapper of CoVal currently only works with [CoNLL line format](https://huggingface.co/datasets/conll2003), which has one word per line with all the annotation for this word in column separated by spaces:
| Column | Type | Description |
|:-------|:----------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| 1 | Document ID | This is a variation on the document filename |
| 2 | Part number | Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. |
| 3 | Word number | |
| 4 | Word | This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. |
| 5 | Part-of-Speech | |
| 6 | Parse bit | This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. |
| 7 | Predicate lemma | The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-". |
| 8 | Predicate Frameset ID | This is the PropBank frameset ID of the predicate in Column 7. |
| 9 | Word sense | This is the word sense of the word in Column 3. |
| 10 | Speaker/Author | This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. |
| 11 | Named Entities | These columns identifies the spans representing various named entities. |
| 12:N | Predicate Arguments | There is one column each of predicate argument structure information for the predicate mentioned in Column 7. |
| N | Coreference | Coreference chain information encoded in a parenthesis structure. |
## Citations
```bibtex
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
```
```bibtex
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
```
```bibtex
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
```
```bibtex
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
```
```bibtex
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
```
## Further References
- [CoNLL 2012 Task Description](http://www.conll.cemantix.org/2012/data.html): for information on the format (section "*_conll File Format")
- [CoNLL Evaluation details](https://github.com/ns-moosavi/coval/blob/master/conll/README.md)
- [Hugging Face - Neural Coreference Resolution (Neuralcoref)](https://huggingface.co/coref/)
| datasets/metrics/coval/README.md/0 | {
"file_path": "datasets/metrics/coval/README.md",
"repo_id": "datasets",
"token_count": 8053
} | 62 |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" IndicGLUE benchmark metric. """
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import f1_score
import datasets
_CITATION = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
_DESCRIPTION = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
_KWARGS_DESCRIPTION = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def simple_accuracy(preds, labels):
return float((preds == labels).mean())
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = float(f1_score(y_true=labels, y_pred=preds))
return {
"accuracy": acc,
"f1": f1,
}
def precision_at_10(en_sentvecs, in_sentvecs):
en_sentvecs = np.array(en_sentvecs)
in_sentvecs = np.array(in_sentvecs)
n = en_sentvecs.shape[0]
# mean centering
en_sentvecs = en_sentvecs - np.mean(en_sentvecs, axis=0)
in_sentvecs = in_sentvecs - np.mean(in_sentvecs, axis=0)
sim = cdist(en_sentvecs, in_sentvecs, "cosine")
actual = np.array(range(n))
preds = sim.argsort(axis=1)[:, :10]
matches = np.any(preds == actual[:, None], axis=1)
return float(matches.mean())
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class IndicGlue(datasets.Metric):
def _info(self):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]'
)
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}
),
codebase_urls=[],
reference_urls=[],
format="numpy" if self.config_name != "cvit-mkb-clsr" else None,
)
def _compute(self, predictions, references):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_10(predictions, references)}
elif self.config_name in ["wiki-ner"]:
return acc_and_f1(predictions, references)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(predictions, references)}
else:
raise KeyError(
"You should supply a configuration name selected in "
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]'
)
| datasets/metrics/indic_glue/indic_glue.py/0 | {
"file_path": "datasets/metrics/indic_glue/indic_glue.py",
"repo_id": "datasets",
"token_count": 2862
} | 63 |
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pearson correlation coefficient metric."""
from scipy.stats import pearsonr
import datasets
_DESCRIPTION = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_KWARGS_DESCRIPTION = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_CITATION = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Pearsonr(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}
),
reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"],
)
def _compute(self, predictions, references, return_pvalue=False):
if return_pvalue:
results = pearsonr(references, predictions)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(references, predictions)[0])}
| datasets/metrics/pearsonr/pearsonr.py/0 | {
"file_path": "datasets/metrics/pearsonr/pearsonr.py",
"repo_id": "datasets",
"token_count": 1794
} | 64 |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" seqeval metric. """
import importlib
from typing import List, Optional, Union
from seqeval.metrics import accuracy_score, classification_report
import datasets
_CITATION = """\
@inproceedings{ramshaw-marcus-1995-text,
title = "Text Chunking using Transformation-Based Learning",
author = "Ramshaw, Lance and
Marcus, Mitch",
booktitle = "Third Workshop on Very Large Corpora",
year = "1995",
url = "https://www.aclweb.org/anthology/W95-0107",
}
@misc{seqeval,
title={{seqeval}: A Python framework for sequence labeling evaluation},
url={https://github.com/chakki-works/seqeval},
note={Software available from https://github.com/chakki-works/seqeval},
author={Hiroki Nakayama},
year={2018},
}
"""
_DESCRIPTION = """\
seqeval is a Python framework for sequence labeling evaluation.
seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on.
This is well-tested by using the Perl script conlleval, which can be used for
measuring the performance of a system that has processed the CoNLL-2000 shared task data.
seqeval supports following formats:
IOB1
IOB2
IOE1
IOE2
IOBES
See the [README.md] file at https://github.com/chakki-works/seqeval for more information.
"""
_KWARGS_DESCRIPTION = """
Produces labelling scores along with its sufficient statistics
from a source against one or more references.
Args:
predictions: List of List of predicted labels (Estimated targets as returned by a tagger)
references: List of List of reference labels (Ground truth (correct) target values)
suffix: True if the IOB prefix is after type, False otherwise. default: False
scheme: Specify target tagging scheme. Should be one of ["IOB1", "IOB2", "IOE1", "IOE2", "IOBES", "BILOU"].
default: None
mode: Whether to count correct entity labels with incorrect I/B tags as true positives or not.
If you want to only count exact matches, pass mode="strict". default: None.
sample_weight: Array-like of shape (n_samples,), weights for individual samples. default: None
zero_division: Which value to substitute as a metric value when encountering zero division. Should be on of 0, 1,
"warn". "warn" acts as 0, but the warning is raised.
Returns:
'scores': dict. Summary of the scores for overall and per type
Overall:
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'f1': F1 score, also known as balanced F-score or F-measure,
Per type:
'precision': precision,
'recall': recall,
'f1': F1 score, also known as balanced F-score or F-measure
Examples:
>>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> seqeval = datasets.load_metric("seqeval")
>>> results = seqeval.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['MISC', 'PER', 'overall_precision', 'overall_recall', 'overall_f1', 'overall_accuracy']
>>> print(results["overall_f1"])
0.5
>>> print(results["PER"]["f1"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Seqeval(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/chakki-works/seqeval",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
}
),
codebase_urls=["https://github.com/chakki-works/seqeval"],
reference_urls=["https://github.com/chakki-works/seqeval"],
)
def _compute(
self,
predictions,
references,
suffix: bool = False,
scheme: Optional[str] = None,
mode: Optional[str] = None,
sample_weight: Optional[List[int]] = None,
zero_division: Union[str, int] = "warn",
):
if scheme is not None:
try:
scheme_module = importlib.import_module("seqeval.scheme")
scheme = getattr(scheme_module, scheme)
except AttributeError:
raise ValueError(f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {scheme}")
report = classification_report(
y_true=references,
y_pred=predictions,
suffix=suffix,
output_dict=True,
scheme=scheme,
mode=mode,
sample_weight=sample_weight,
zero_division=zero_division,
)
report.pop("macro avg")
report.pop("weighted avg")
overall_score = report.pop("micro avg")
scores = {
type_name: {
"precision": score["precision"],
"recall": score["recall"],
"f1": score["f1-score"],
"number": score["support"],
}
for type_name, score in report.items()
}
scores["overall_precision"] = overall_score["precision"]
scores["overall_recall"] = overall_score["recall"]
scores["overall_f1"] = overall_score["f1-score"]
scores["overall_accuracy"] = accuracy_score(y_true=references, y_pred=predictions)
return scores
| datasets/metrics/seqeval/seqeval.py/0 | {
"file_path": "datasets/metrics/seqeval/seqeval.py",
"repo_id": "datasets",
"token_count": 2504
} | 65 |
# Metric Card for WikiSplit
## Metric description
WikiSplit is the combination of three metrics: [SARI](https://huggingface.co/metrics/sari), [exact match](https://huggingface.co/metrics/exact_match) and [SacreBLEU](https://huggingface.co/metrics/sacrebleu).
It can be used to evaluate the quality of sentence splitting approaches, which require rewriting a long sentence into two or more coherent short sentences, e.g. based on the [WikiSplit dataset](https://huggingface.co/datasets/wiki_split).
## How to use
The WIKI_SPLIT metric takes three inputs:
`sources`: a list of source sentences, where each sentence should be a string.
`predictions`: a list of predicted sentences, where each sentence should be a string.
`references`: a list of lists of reference sentences, where each sentence should be a string.
```python
>>> from datasets import load_metric
>>> wiki_split = load_metric("wiki_split")
>>> sources = ["About 95 species are currently accepted ."]
>>> predictions = ["About 95 you now get in ."]
>>> references= [["About 95 species are currently known ."]]
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
```
## Output values
This metric outputs a dictionary containing three scores:
`sari`: the [SARI](https://huggingface.co/metrics/sari) score, whose range is between `0.0` and `100.0` -- the higher the value, the better the performance of the model being evaluated, with a SARI of 100 being a perfect score.
`sacrebleu`: the [SacreBLEU](https://huggingface.co/metrics/sacrebleu) score, which can take any value between `0.0` and `100.0`, inclusive.
`exact`: the [exact match](https://huggingface.co/metrics/exact_match) score, which represents the sum of all of the individual exact match scores in the set, divided by the total number of predictions in the set. It ranges from `0.0` to `100`, inclusive. Here, `0.0` means no prediction/reference pairs were matches, while `100.0` means they all were.
```python
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
```
### Values from popular papers
This metric was initially used by [Rothe et al.(2020)](https://arxiv.org/pdf/1907.12461.pdf) to evaluate the performance of different split-and-rephrase approaches on the [WikiSplit dataset](https://huggingface.co/datasets/wiki_split). They reported a SARI score of 63.5, a SacreBLEU score of 77.2, and an EXACT_MATCH score of 16.3.
## Examples
Perfect match between prediction and reference:
```python
>>> from datasets import load_metric
>>> wiki_split = load_metric("wiki_split")
>>> sources = ["About 95 species are currently accepted ."]
>>> predictions = ["About 95 species are currently accepted ."]
>>> references= [["About 95 species are currently accepted ."]]
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 100.0, 'sacrebleu': 100.00000000000004, 'exact': 100.0
```
Partial match between prediction and reference:
```python
>>> from datasets import load_metric
>>> wiki_split = load_metric("wiki_split")
>>> sources = ["About 95 species are currently accepted ."]
>>> predictions = ["About 95 you now get in ."]
>>> references= [["About 95 species are currently known ."]]
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
```
No match between prediction and reference:
```python
>>> from datasets import load_metric
>>> wiki_split = load_metric("wiki_split")
>>> sources = ["About 95 species are currently accepted ."]
>>> predictions = ["Hello world ."]
>>> references= [["About 95 species are currently known ."]]
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 14.047619047619046, 'sacrebleu': 0.0, 'exact': 0.0}
```
## Limitations and bias
This metric is not the official metric to evaluate models on the [WikiSplit dataset](https://huggingface.co/datasets/wiki_split). It was initially proposed by [Rothe et al.(2020)](https://arxiv.org/pdf/1907.12461.pdf), whereas the [original paper introducing the WikiSplit dataset (2018)](https://aclanthology.org/D18-1080.pdf) uses different metrics to evaluate performance, such as corpus-level [BLEU](https://huggingface.co/metrics/bleu) and sentence-level BLEU.
## Citation
```bibtex
@article{rothe2020leveraging,
title={Leveraging pre-trained checkpoints for sequence generation tasks},
author={Rothe, Sascha and Narayan, Shashi and Severyn, Aliaksei},
journal={Transactions of the Association for Computational Linguistics},
volume={8},
pages={264--280},
year={2020},
publisher={MIT Press}
}
```
## Further References
- [WikiSplit dataset](https://huggingface.co/datasets/wiki_split)
- [WikiSplit paper (Botha et al., 2018)](https://aclanthology.org/D18-1080.pdf)
| datasets/metrics/wiki_split/README.md/0 | {
"file_path": "datasets/metrics/wiki_split/README.md",
"repo_id": "datasets",
"token_count": 1504
} | 66 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseDatasetsCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
| datasets/src/datasets/commands/__init__.py/0 | {
"file_path": "datasets/src/datasets/commands/__init__.py",
"repo_id": "datasets",
"token_count": 107
} | 67 |
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from typing import Any, Dict, List, Optional, Union
from huggingface_hub import HfFileSystem
from . import config
from .table import CastError
from .utils.track import TrackedIterable, tracked_list, tracked_str
class DatasetsError(Exception):
"""Base class for exceptions in this library."""
class DefunctDatasetError(DatasetsError):
"""The dataset has been defunct."""
class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
"""FileNotFoundError raised by this library."""
class DataFilesNotFoundError(FileNotFoundDatasetsError):
"""No (supported) data files found."""
class DatasetNotFoundError(FileNotFoundDatasetsError):
"""Dataset not found.
Raised when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
"""
class DatasetBuildError(DatasetsError):
pass
class ManualDownloadError(DatasetBuildError):
pass
class FileFormatError(DatasetBuildError):
pass
class DatasetGenerationError(DatasetBuildError):
pass
class DatasetGenerationCastError(DatasetGenerationError):
@classmethod
def from_cast_error(
cls,
cast_error: CastError,
builder_name: str,
gen_kwargs: Dict[str, Any],
token: Optional[Union[bool, str]],
) -> "DatasetGenerationCastError":
explanation_message = (
f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
)
formatted_tracked_gen_kwargs: List[str] = []
for gen_kwarg in gen_kwargs.values():
if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterable)):
continue
while isinstance(gen_kwarg, (tracked_list, TrackedIterable)) and gen_kwarg.last_item is not None:
gen_kwarg = gen_kwarg.last_item
if isinstance(gen_kwarg, tracked_str):
gen_kwarg = gen_kwarg.get_origin()
if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
gen_kwarg = "hf://" + resolved_path.unresolve()
if "@" + resolved_path.revision in gen_kwarg:
gen_kwarg = (
gen_kwarg.replace("@" + resolved_path.revision, "", 1)
+ f" (at revision {resolved_path.revision})"
)
formatted_tracked_gen_kwargs.append(str(gen_kwarg))
if formatted_tracked_gen_kwargs:
explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
return cls("An error occurred while generating the dataset" + explanation_message + help_message)
| datasets/src/datasets/exceptions.py/0 | {
"file_path": "datasets/src/datasets/exceptions.py",
"repo_id": "datasets",
"token_count": 1260
} | 68 |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" DatasetInfo and MetricInfo record information we know about a dataset and a metric.
This includes things that we know about the dataset statically, i.e.:
- description
- canonical location
- does it have validation and tests splits
- size
- etc.
This also includes the things that can and should be computed once we've
processed the dataset as well:
- number of examples (in each split)
- etc.
"""
import copy
import dataclasses
import json
import os
import posixpath
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import ClassVar, Dict, List, Optional, Union
import fsspec
from huggingface_hub import DatasetCard, DatasetCardData
from . import config
from .features import Features, Value
from .splits import SplitDict
from .tasks import TaskTemplate, task_template_from_dict
from .utils import Version
from .utils.logging import get_logger
from .utils.py_utils import asdict, unique_values
logger = get_logger(__name__)
@dataclass
class SupervisedKeysData:
input: str = ""
output: str = ""
@dataclass
class DownloadChecksumsEntryData:
key: str = ""
value: str = ""
class MissingCachedSizesConfigError(Exception):
"""The expected cached sizes of the download file are missing."""
class NonMatchingCachedSizesError(Exception):
"""The prepared split doesn't have expected sizes."""
@dataclass
class PostProcessedInfo:
features: Optional[Features] = None
resources_checksums: Optional[dict] = None
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
@classmethod
def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names})
@dataclass
class DatasetInfo:
"""Information about a dataset.
`DatasetInfo` documents datasets, including its name, version, and features.
See the constructor arguments and properties for a full list.
Not all fields are known on construction and may be updated later.
Attributes:
description (`str`):
A description of the dataset.
citation (`str`):
A BibTeX citation of the dataset.
homepage (`str`):
A URL to the official homepage for the dataset.
license (`str`):
The dataset's license. It can be the name of the license or a paragraph containing the terms of the license.
features ([`Features`], *optional*):
The features used to specify the dataset's column types.
post_processed (`PostProcessedInfo`, *optional*):
Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index.
supervised_keys (`SupervisedKeysData`, *optional*):
Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS).
builder_name (`str`, *optional*):
The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name.
config_name (`str`, *optional*):
The name of the configuration derived from [`BuilderConfig`].
version (`str` or [`Version`], *optional*):
The version of the dataset.
splits (`dict`, *optional*):
The mapping between split name and metadata.
download_checksums (`dict`, *optional*):
The mapping between the URL to download the dataset's checksums and corresponding metadata.
download_size (`int`, *optional*):
The size of the files to download to generate the dataset, in bytes.
post_processing_size (`int`, *optional*):
Size of the dataset in bytes after post-processing, if any.
dataset_size (`int`, *optional*):
The combined size in bytes of the Arrow tables for all splits.
size_in_bytes (`int`, *optional*):
The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files).
task_templates (`List[TaskTemplate]`, *optional*):
The task templates to prepare the dataset for during training and evaluation. Each template casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
**config_kwargs (additional keyword arguments):
Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`].
"""
# Set in the dataset scripts
description: str = dataclasses.field(default_factory=str)
citation: str = dataclasses.field(default_factory=str)
homepage: str = dataclasses.field(default_factory=str)
license: str = dataclasses.field(default_factory=str)
features: Optional[Features] = None
post_processed: Optional[PostProcessedInfo] = None
supervised_keys: Optional[SupervisedKeysData] = None
task_templates: Optional[List[TaskTemplate]] = None
# Set later by the builder
builder_name: Optional[str] = None
dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name
config_name: Optional[str] = None
version: Optional[Union[str, Version]] = None
# Set later by `download_and_prepare`
splits: Optional[dict] = None
download_checksums: Optional[dict] = None
download_size: Optional[int] = None
post_processing_size: Optional[int] = None
dataset_size: Optional[int] = None
size_in_bytes: Optional[int] = None
_INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [
"config_name",
"download_size",
"dataset_size",
"features",
"splits",
]
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo):
self.post_processed = PostProcessedInfo.from_dict(self.post_processed)
if self.version is not None and not isinstance(self.version, Version):
if isinstance(self.version, str):
self.version = Version(self.version)
else:
self.version = Version.from_dict(self.version)
if self.splits is not None and not isinstance(self.splits, SplitDict):
self.splits = SplitDict.from_split_dict(self.splits)
if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
if isinstance(self.supervised_keys, (tuple, list)):
self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
else:
self.supervised_keys = SupervisedKeysData(**self.supervised_keys)
# Parse and make a list of templates
if self.task_templates is not None:
if isinstance(self.task_templates, (list, tuple)):
templates = [
template if isinstance(template, TaskTemplate) else task_template_from_dict(template)
for template in self.task_templates
]
self.task_templates = [template for template in templates if template is not None]
elif isinstance(self.task_templates, TaskTemplate):
self.task_templates = [self.task_templates]
else:
template = task_template_from_dict(self.task_templates)
self.task_templates = [template] if template is not None else []
# Align task templates with features
if self.task_templates is not None:
self.task_templates = list(self.task_templates)
if self.features is not None:
self.task_templates = [
template.align_with_features(self.features) for template in (self.task_templates)
]
def write_to_directory(
self, dataset_info_dir, pretty_print=False, fs="deprecated", storage_options: Optional[dict] = None
):
"""Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.
Args:
dataset_info_dir (`str`):
Destination directory.
pretty_print (`bool`, defaults to `False`):
If `True`, the JSON will be pretty-printed with the indent level of 4.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem used to download the files from.
<Deprecated version="2.9.0">
`fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
</Deprecated>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.info.write_to_directory("/path/to/directory/")
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs: fsspec.AbstractFileSystem
fs, _, _ = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options)
with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f:
self._dump_info(f, pretty_print=pretty_print)
if self.license:
with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f:
self._dump_license(f)
def _dump_info(self, file, pretty_print=False):
"""Dump info in `file` file-like object open in bytes mode (to support remote files)"""
file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8"))
def _dump_license(self, file):
"""Dump license in `file` file-like object open in bytes mode (to support remote files)"""
file.write(self.license.encode("utf-8"))
@classmethod
def from_merge(cls, dataset_infos: List["DatasetInfo"]):
dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None]
if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos):
# if all dataset_infos are equal we don't need to merge. Just return the first.
return dataset_infos[0]
description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip()
citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip()
homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip()
license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip()
features = None
supervised_keys = None
task_templates = None
# Find common task templates across all dataset infos
all_task_templates = [info.task_templates for info in dataset_infos if info.task_templates is not None]
if len(all_task_templates) > 1:
task_templates = list(set(all_task_templates[0]).intersection(*all_task_templates[1:]))
elif len(all_task_templates):
task_templates = list(set(all_task_templates[0]))
# If no common task templates found, replace empty list with None
task_templates = task_templates if task_templates else None
return cls(
description=description,
citation=citation,
homepage=homepage,
license=license,
features=features,
supervised_keys=supervised_keys,
task_templates=task_templates,
)
@classmethod
def from_directory(
cls, dataset_info_dir: str, fs="deprecated", storage_options: Optional[dict] = None
) -> "DatasetInfo":
"""Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the [`DatasetInfo`].
This will overwrite all previous metadata.
Args:
dataset_info_dir (`str`):
The directory containing the metadata file. This
should be the root directory of a specific dataset version.
fs (`fsspec.spec.AbstractFileSystem`, *optional*):
Instance of the remote filesystem used to download the files from.
<Deprecated version="2.9.0">
`fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
</Deprecated>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example:
```py
>>> from datasets import DatasetInfo
>>> ds_info = DatasetInfo.from_directory("/path/to/directory/")
```
"""
if fs != "deprecated":
warnings.warn(
"'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
"You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
FutureWarning,
)
storage_options = fs.storage_options
fs: fsspec.AbstractFileSystem
fs, _, _ = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options)
logger.info(f"Loading Dataset info from {dataset_info_dir}")
if not dataset_info_dir:
raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f:
dataset_info_dict = json.load(f)
return cls.from_dict(dataset_info_dict)
@classmethod
def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names})
def update(self, other_dataset_info: "DatasetInfo", ignore_none=True):
self_dict = self.__dict__
self_dict.update(
**{
k: copy.deepcopy(v)
for k, v in other_dataset_info.__dict__.items()
if (v is not None or not ignore_none)
}
)
def copy(self) -> "DatasetInfo":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def _to_yaml_dict(self) -> dict:
yaml_dict = {}
dataset_info_dict = asdict(self)
for key in dataset_info_dict:
if key in self._INCLUDED_INFO_IN_YAML:
value = getattr(self, key)
if hasattr(value, "_to_yaml_list"): # Features, SplitDict
yaml_dict[key] = value._to_yaml_list()
elif hasattr(value, "_to_yaml_string"): # Version
yaml_dict[key] = value._to_yaml_string()
else:
yaml_dict[key] = value
return yaml_dict
@classmethod
def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo":
yaml_data = copy.deepcopy(yaml_data)
if yaml_data.get("features") is not None:
yaml_data["features"] = Features._from_yaml_list(yaml_data["features"])
if yaml_data.get("splits") is not None:
yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"])
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in yaml_data.items() if k in field_names})
class DatasetInfosDict(Dict[str, DatasetInfo]):
def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None:
total_dataset_infos = {}
dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)
dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)
if not overwrite:
total_dataset_infos = self.from_directory(dataset_infos_dir)
total_dataset_infos.update(self)
if os.path.exists(dataset_infos_path):
# for backward compatibility, let's update the JSON file if it exists
with open(dataset_infos_path, "w", encoding="utf-8") as f:
dataset_infos_dict = {
config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()
}
json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None)
# Dump the infos in the YAML part of the README.md file
if os.path.exists(dataset_readme_path):
dataset_card = DatasetCard.load(dataset_readme_path)
dataset_card_data = dataset_card.data
else:
dataset_card = None
dataset_card_data = DatasetCardData()
if total_dataset_infos:
total_dataset_infos.to_dataset_card_data(dataset_card_data)
dataset_card = (
DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card
)
dataset_card.save(Path(dataset_readme_path))
@classmethod
def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict":
logger.info(f"Loading Dataset Infos from {dataset_infos_dir}")
# Load the info from the YAML part of README.md
if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)):
dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data
if "dataset_info" in dataset_card_data:
return cls.from_dataset_card_data(dataset_card_data)
if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)):
# this is just to have backward compatibility with dataset_infos.json files
with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
return cls(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
else:
return cls()
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict":
if isinstance(dataset_card_data.get("dataset_info"), (list, dict)):
if isinstance(dataset_card_data["dataset_info"], list):
return cls(
{
dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict(
dataset_info_yaml_dict
)
for dataset_info_yaml_dict in dataset_card_data["dataset_info"]
}
)
else:
dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"])
dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default")
return cls({dataset_info.config_name: dataset_info})
else:
return cls()
def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
if self:
# first get existing metadata info
if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict):
dataset_metadata_infos = {
dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"]
}
elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list):
dataset_metadata_infos = {
config_metadata["config_name"]: config_metadata
for config_metadata in dataset_card_data["dataset_info"]
}
else:
dataset_metadata_infos = {}
# update/rewrite existing metadata info with the one to dump
total_dataset_infos = {
**dataset_metadata_infos,
**{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()},
}
# the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo
for config_name, dset_info_yaml_dict in total_dataset_infos.items():
dset_info_yaml_dict["config_name"] = config_name
if len(total_dataset_infos) == 1:
# use a struct instead of a list of configurations, since there's only one
dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values()))
config_name = dataset_card_data["dataset_info"].pop("config_name", None)
if config_name != "default":
# if config_name is not "default" preserve it and put at the first position
dataset_card_data["dataset_info"] = {
"config_name": config_name,
**dataset_card_data["dataset_info"],
}
else:
dataset_card_data["dataset_info"] = []
for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()):
# add the config_name field in first position
dataset_info_yaml_dict.pop("config_name", None)
dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict}
dataset_card_data["dataset_info"].append(dataset_info_yaml_dict)
@dataclass
class MetricInfo:
"""Information about a metric.
`MetricInfo` documents a metric, including its name, version, and features.
See the constructor arguments and properties for a full list.
Note: Not all fields are known on construction and may be updated later.
"""
# Set in the dataset scripts
description: str
citation: str
features: Features
inputs_description: str = dataclasses.field(default_factory=str)
homepage: str = dataclasses.field(default_factory=str)
license: str = dataclasses.field(default_factory=str)
codebase_urls: List[str] = dataclasses.field(default_factory=list)
reference_urls: List[str] = dataclasses.field(default_factory=list)
streamable: bool = False
format: Optional[str] = None
# Set later by the builder
metric_name: Optional[str] = None
config_name: Optional[str] = None
experiment_id: Optional[str] = None
def __post_init__(self):
if self.format is not None:
for key, value in self.features.items():
if not isinstance(value, Value):
raise ValueError(
f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
f"Here {key} is an instance of {value.__class__.__name__}"
)
def write_to_directory(self, metric_info_dir, pretty_print=False):
"""Write `MetricInfo` as JSON to `metric_info_dir`.
Also save the license separately in LICENCE.
If `pretty_print` is True, the JSON will be pretty-printed with the indent level of 4.
Example:
```py
>>> from datasets import load_metric
>>> metric = load_metric("accuracy")
>>> metric.info.write_to_directory("/path/to/directory/")
```
"""
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
json.dump(asdict(self), f, indent=4 if pretty_print else None)
if self.license:
with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
f.write(self.license)
@classmethod
def from_directory(cls, metric_info_dir) -> "MetricInfo":
"""Create MetricInfo from the JSON file in `metric_info_dir`.
Args:
metric_info_dir: `str` The directory containing the metadata file. This
should be the root directory of a specific dataset version.
Example:
```py
>>> from datasets import MetricInfo
>>> metric_info = MetricInfo.from_directory("/path/to/directory/")
```
"""
logger.info(f"Loading Metric info from {metric_info_dir}")
if not metric_info_dir:
raise ValueError("Calling MetricInfo.from_directory() with undefined metric_info_dir.")
with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
metric_info_dict = json.load(f)
return cls.from_dict(metric_info_dict)
@classmethod
def from_dict(cls, metric_info_dict: dict) -> "MetricInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
| datasets/src/datasets/info.py/0 | {
"file_path": "datasets/src/datasets/info.py",
"repo_id": "datasets",
"token_count": 11409
} | 69 |
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache # noqa F401
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
| datasets/src/datasets/packaged_modules/__init__.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/__init__.py",
"repo_id": "datasets",
"token_count": 1108
} | 70 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class JsonConfig(datasets.BuilderConfig):
"""BuilderConfig for JSON."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None
field: Optional[str] = None
use_threads: bool = True # deprecated
block_size: Optional[int] = None # deprecated
chunksize: int = 10 << 20 # 10MB
newlines_in_values: Optional[bool] = None
class Json(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = JsonConfig
def _info(self):
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
self.config.chunksize = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore."
)
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
type = self.config.features.arrow_schema.field(column_name).type
pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
dataset = json.load(f)
# We keep only the field we are interested in
dataset = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(dataset, (list, tuple)):
keys = set().union(*[row.keys() for row in dataset])
mapping = {col: [row.get(col) for row in dataset] for col in keys}
else:
mapping = dataset
pa_table = pa.Table.from_pydict(mapping)
yield file_idx, self._cast_table(pa_table)
# If the file has one json object per line
else:
with open(file, "rb") as f:
batch_idx = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
block_size = max(self.config.chunksize // 32, 16 << 10)
encoding_errors = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
batch = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(f)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8")
try:
while True:
try:
pa_table = paj.read_json(
io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)
)
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(e, pa.ArrowInvalid)
and "straddling" not in str(e)
or block_size > len(batch)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}."
)
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
file, encoding=self.config.encoding, errors=self.config.encoding_errors
) as f:
dataset = json.load(f)
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(dataset, list): # list is the only sequence type supported in JSON
try:
keys = set().union(*[row.keys() for row in dataset])
mapping = {col: [row.get(col) for row in dataset] for col in keys}
pa_table = pa.Table.from_pydict(mapping)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(pa_table)
break
else:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. "
) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
| datasets/src/datasets/packaged_modules/json/json.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/json/json.py",
"repo_id": "datasets",
"token_count": 4907
} | 71 |
import importlib.util
import os
import tempfile
from pathlib import PurePath
from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union
import fsspec
import numpy as np
from .utils import logging
from .utils import tqdm as hf_tqdm
if TYPE_CHECKING:
from .arrow_dataset import Dataset # noqa: F401
try:
from elasticsearch import Elasticsearch # noqa: F401
except ImportError:
pass
try:
import faiss # noqa: F401
except ImportError:
pass
_has_elasticsearch = importlib.util.find_spec("elasticsearch") is not None
_has_faiss = importlib.util.find_spec("faiss") is not None
logger = logging.get_logger(__name__)
class MissingIndex(Exception):
pass
class SearchResults(NamedTuple):
scores: List[float]
indices: List[int]
class BatchedSearchResults(NamedTuple):
total_scores: List[List[float]]
total_indices: List[List[int]]
class NearestExamplesResults(NamedTuple):
scores: List[float]
examples: dict
class BatchedNearestExamplesResults(NamedTuple):
total_scores: List[List[float]]
total_examples: List[dict]
class BaseIndex:
"""Base class for indexing"""
def search(self, query, k: int = 10, **kwargs) -> SearchResults:
"""
To implement.
This method has to return the scores and the indices of the retrieved examples given a certain query.
"""
raise NotImplementedError
def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults:
"""Find the nearest examples indices to the query.
Args:
queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index.
k (`int`): The number of examples to retrieve per query.
Ouput:
total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
"""
total_scores, total_indices = [], []
for query in queries:
scores, indices = self.search(query, k)
total_scores.append(scores)
total_indices.append(indices)
return BatchedSearchResults(total_scores, total_indices)
def save(self, file: Union[str, PurePath]):
"""Serialize the index on disk"""
raise NotImplementedError
@classmethod
def load(cls, file: Union[str, PurePath]) -> "BaseIndex":
"""Deserialize the index from disk"""
raise NotImplementedError
class ElasticSearchIndex(BaseIndex):
"""
Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity.
An Elasticsearch server needs to be accessible, and a python client is declared with
```
es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
```
for example.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
if not _has_elasticsearch:
raise ImportError(
"You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`"
)
if es_client is not None and (host is not None or port is not None):
raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.")
host = host or "localhost"
port = port or 9200
import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features
from elasticsearch import Elasticsearch # noqa: F811
self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}])
self.es_index_name = (
es_index_name
if es_index_name is not None
else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name)
)
self.es_index_config = (
es_index_config
if es_index_config is not None
else {
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}},
}
)
def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional[str] = None):
"""
Add documents to the index.
If the documents are inside a certain column, you can specify it using the `column` argument.
"""
index_name = self.es_index_name
index_config = self.es_index_config
self.es_client.indices.create(index=index_name, body=index_config)
number_of_docs = len(documents)
progress = hf_tqdm(unit="docs", total=number_of_docs)
successes = 0
def passage_generator():
if column is not None:
for i, example in enumerate(documents):
yield {"text": example[column], "_id": i}
else:
for i, example in enumerate(documents):
yield {"text": example, "_id": i}
# create the ES index
import elasticsearch as es
for ok, action in es.helpers.streaming_bulk(
client=self.es_client,
index=index_name,
actions=passage_generator(),
):
progress.update(1)
successes += ok
if successes != len(documents):
logger.warning(
f"Some documents failed to be added to ElasticSearch. Failures: {len(documents)-successes}/{len(documents)}"
)
logger.info(f"Indexed {successes:d} documents")
def search(self, query: str, k=10, **kwargs) -> SearchResults:
"""Find the nearest examples indices to the query.
Args:
query (`str`): The query as a string.
k (`int`): The number of examples to retrieve.
Ouput:
scores (`List[List[float]`): The retrieval scores of the retrieved examples.
indices (`List[List[int]]`): The indices of the retrieved examples.
"""
response = self.es_client.search(
index=self.es_index_name,
body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k},
**kwargs,
)
hits = response["hits"]["hits"]
return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits])
def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults:
import concurrent.futures
total_scores, total_indices = [None] * len(queries), [None] * len(queries)
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)}
for future in concurrent.futures.as_completed(future_to_index):
index = future_to_index[future]
results: SearchResults = future.result()
total_scores[index] = results.scores
total_indices[index] = results.indices
return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores)
class FaissIndex(BaseIndex):
"""
Dense index using Faiss. It is used to index vectors.
Faiss is a library for efficient similarity search and clustering of dense vectors.
It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.
You can find more information about Faiss here:
- For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory
- For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU
"""
def __init__(
self,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
):
"""
Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
"""
if string_factory is not None and custom_index is not None:
raise ValueError("Please specify either `string_factory` or `custom_index` but not both.")
if device is not None and custom_index is not None:
raise ValueError(
"Cannot pass both 'custom_index' and 'device'. "
"Pass 'custom_index' already transferred to the target device instead."
)
self.device = device
self.string_factory = string_factory
self.metric_type = metric_type
self.faiss_index = custom_index
if not _has_faiss:
raise ImportError(
"You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. "
"A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. "
"Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available."
)
def add_vectors(
self,
vectors: Union[np.array, "Dataset"],
column: Optional[str] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: Optional[bool] = None,
):
"""
Add vectors to the index.
If the arrays are inside a certain column, you can specify it using the `column` argument.
"""
import faiss # noqa: F811
# Create index
if self.faiss_index is None:
size = len(vectors[0]) if column is None else len(vectors[0][column])
if self.string_factory is not None:
if self.metric_type is None:
index = faiss.index_factory(size, self.string_factory)
else:
index = faiss.index_factory(size, self.string_factory, self.metric_type)
else:
if self.metric_type is None:
index = faiss.IndexFlat(size)
else:
index = faiss.IndexFlat(size, self.metric_type)
self.faiss_index = self._faiss_index_to_device(index, self.device)
logger.info(f"Created faiss index of type {type(self.faiss_index)}")
# Set verbosity level
if faiss_verbose is not None:
self.faiss_index.verbose = faiss_verbose
if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None:
self.faiss_index.index.verbose = faiss_verbose
if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None:
self.faiss_index.quantizer.verbose = faiss_verbose
if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None:
self.faiss_index.clustering_index.verbose = faiss_verbose
# Train
if train_size is not None:
train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column]
logger.info(f"Training the index with the first {len(train_vecs)} vectors")
self.faiss_index.train(train_vecs)
else:
logger.info("Ignored the training step of the faiss index as `train_size` is None.")
# Add vectors
logger.info(f"Adding {len(vectors)} vectors to the faiss index")
for i in hf_tqdm(range(0, len(vectors), batch_size)):
vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]
self.faiss_index.add(vecs)
@staticmethod
def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, List[int]]] = None) -> "faiss.Index":
"""
Sends a faiss index to a device.
A device can either be a positive integer (GPU id), a negative integer (all GPUs),
or a list of positive integers (select GPUs to use), or `None` for CPU.
"""
# If device is not specified, then it runs on CPU.
if device is None:
return index
import faiss # noqa: F811
# If the device id is given as an integer
if isinstance(device, int):
# Positive integers are directly mapped to GPU ids
if device > -1:
faiss_res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(faiss_res, device, index)
# And negative integers mean using all GPUs
else:
index = faiss.index_cpu_to_all_gpus(index)
# Device ids given as a list mean mapping to those devices specified.
elif isinstance(device, (list, tuple)):
index = faiss.index_cpu_to_gpus_list(index, gpus=list(device))
else:
raise TypeError(
f"The argument type: {type(device)} is not expected. "
+ "Please pass in either nothing, a positive int, a negative int, or a list of positive ints."
)
return index
def search(self, query: np.array, k=10, **kwargs) -> SearchResults:
"""Find the nearest examples indices to the query.
Args:
query (`np.array`): The query as a numpy array.
k (`int`): The number of examples to retrieve.
Ouput:
scores (`List[List[float]`): The retrieval scores of the retrieved examples.
indices (`List[List[int]]`): The indices of the retrieved examples.
"""
if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1):
raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)")
queries = query.reshape(1, -1)
if not queries.flags.c_contiguous:
queries = np.asarray(queries, order="C")
scores, indices = self.faiss_index.search(queries, k, **kwargs)
return SearchResults(scores[0], indices[0].astype(int))
def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults:
"""Find the nearest examples indices to the queries.
Args:
queries (`np.array`): The queries as a numpy array.
k (`int`): The number of examples to retrieve.
Ouput:
total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
"""
if len(queries.shape) != 2:
raise ValueError("Shape of query must be 2D")
if not queries.flags.c_contiguous:
queries = np.asarray(queries, order="C")
scores, indices = self.faiss_index.search(queries, k, **kwargs)
return BatchedSearchResults(scores, indices.astype(int))
def save(self, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
"""Serialize the FaissIndex on disk"""
import faiss # noqa: F811
if self.device is not None and isinstance(self.device, (int, list, tuple)):
index = faiss.index_gpu_to_cpu(self.faiss_index)
else:
index = self.faiss_index
with fsspec.open(str(file), "wb", **(storage_options or {})) as f:
faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write)))
@classmethod
def load(
cls,
file: Union[str, PurePath],
device: Optional[Union[int, List[int]]] = None,
storage_options: Optional[Dict] = None,
) -> "FaissIndex":
"""Deserialize the FaissIndex from disk"""
import faiss # noqa: F811
# Instances of FaissIndex is essentially just a wrapper for faiss indices.
faiss_index = cls(device=device)
with fsspec.open(str(file), "rb", **(storage_options or {})) as f:
index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read)))
faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device)
return faiss_index
class IndexableMixin:
"""Add indexing features to `datasets.Dataset`"""
def __init__(self):
self._indexes: Dict[str, BaseIndex] = {}
def __len__(self):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
def is_index_initialized(self, index_name: str) -> bool:
return index_name in self._indexes
def _check_index_is_initialized(self, index_name: str):
if not self.is_index_initialized(index_name):
raise MissingIndex(
f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first."
)
def list_indexes(self) -> List[str]:
"""List the `colindex_nameumns`/identifiers of all the attached indexes."""
return list(self._indexes)
def get_index(self, index_name: str) -> BaseIndex:
"""List the `index_name`/identifiers of all the attached indexes.
Args:
index_name (`str`): Index name.
Returns:
[`BaseIndex`]
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name]
def add_faiss_index(
self,
column: str,
index_name: Optional[str] = None,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of the specified column.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
Args:
column (`str`): The column of the vectors to add to the index.
index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
By default it corresponds to `column`.
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
"""
index_name = index_name if index_name is not None else column
faiss_index = FaissIndex(
device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
)
faiss_index.add_vectors(
self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
)
self._indexes[index_name] = faiss_index
def add_faiss_index_from_external_arrays(
self,
external_arrays: np.array,
index_name: str,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of `external_arrays`.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
Args:
external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
"""
faiss_index = FaissIndex(
device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
)
faiss_index.add_vectors(
external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
)
self._indexes[index_name] = faiss_index
def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
"""Save a FaissIndex on disk.
Args:
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.11.0"/>
"""
index = self.get_index(index_name)
if not isinstance(index, FaissIndex):
raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'")
index.save(file, storage_options=storage_options)
logger.info(f"Saved FaissIndex {index_name} at {file}")
def load_faiss_index(
self,
index_name: str,
file: Union[str, PurePath],
device: Optional[Union[int, List[int]]] = None,
storage_options: Optional[Dict] = None,
):
"""Load a FaissIndex from disk.
If you want to do additional configurations, you can have access to the faiss index object by doing
`.get_index(index_name).faiss_index` to make it fit your needs.
Args:
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to
call `.get_nearest` or `.search`.
file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.11.0"/>
"""
index = FaissIndex.load(file, device=device, storage_options=storage_options)
if index.faiss_index.ntotal != len(self):
raise ValueError(
f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples."
)
self._indexes[index_name] = index
logger.info(f"Loaded FaissIndex {index_name} from {file}")
def add_elasticsearch_index(
self,
column: str,
index_name: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
"""Add a text index using ElasticSearch for fast retrieval.
Args:
column (`str`): The column of the documents to add to the index.
index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.
By default it corresponds to `column`.
host (Optional `str`, defaults to localhost):
host of where ElasticSearch is running
port (Optional `str`, defaults to 9200):
port of where ElasticSearch is running
es_client (Optional `elasticsearch.Elasticsearch`):
The elasticsearch client used to create the index if host and port are None.
es_index_name (Optional `str`): The elasticsearch index name used to create the index.
es_index_config (Optional `dict`):
The configuration of the elasticsearch index.
Default config is:
Config::
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
"""
index_name = index_name if index_name is not None else column
es_index = ElasticSearchIndex(
host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
)
es_index.add_documents(self, column=column)
self._indexes[index_name] = es_index
def load_elasticsearch_index(
self,
index_name: str,
es_index_name: str,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_config: Optional[dict] = None,
):
"""Load an existing text index using ElasticSearch for fast retrieval.
Args:
index_name (`str`):
The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`.
es_index_name (`str`):
The name of elasticsearch index to load.
host (`str`, *optional*, defaults to `localhost`):
Host of where ElasticSearch is running.
port (`str`, *optional*, defaults to `9200`):
Port of where ElasticSearch is running.
es_client (`elasticsearch.Elasticsearch`, *optional*):
The elasticsearch client used to create the index if host and port are `None`.
es_index_config (`dict`, *optional*):
The configuration of the elasticsearch index.
Default config is:
```
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
```
"""
self._indexes[index_name] = ElasticSearchIndex(
host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
)
def drop_index(self, index_name: str):
"""Drop the index with the specified column.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
"""
del self._indexes[index_name]
def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults:
"""Find the nearest examples indices in the dataset to the query.
Args:
index_name (`str`):
The name/identifier of the index.
query (`Union[str, np.ndarray]`):
The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve.
Returns:
`(scores, indices)`:
A tuple of `(scores, indices)` where:
- **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
- **indices** (`List[List[int]]`): the indices of the retrieved examples
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name].search(query, k, **kwargs)
def search_batch(
self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
) -> BatchedSearchResults:
"""Find the nearest examples indices in the dataset to the query.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
queries (`Union[List[str], np.ndarray]`):
The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve per query.
Returns:
`(total_scores, total_indices)`:
A tuple of `(total_scores, total_indices)` where:
- **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
- **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name].search_batch(queries, k, **kwargs)
def get_nearest_examples(
self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs
) -> NearestExamplesResults:
"""Find the nearest examples in the dataset to the query.
Args:
index_name (`str`):
The index_name/identifier of the index.
query (`Union[str, np.ndarray]`):
The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve.
Returns:
`(scores, examples)`:
A tuple of `(scores, examples)` where:
- **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
- **examples** (`dict`): the retrieved examples
"""
self._check_index_is_initialized(index_name)
scores, indices = self.search(index_name, query, k, **kwargs)
top_indices = [i for i in indices if i >= 0]
return NearestExamplesResults(scores[: len(top_indices)], self[top_indices])
def get_nearest_examples_batch(
self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
) -> BatchedNearestExamplesResults:
"""Find the nearest examples in the dataset to the query.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
queries (`Union[List[str], np.ndarray]`):
The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve per query.
Returns:
`(total_scores, total_examples)`:
A tuple of `(total_scores, total_examples)` where:
- **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
- **total_examples** (`List[dict]`): the retrieved examples per query
"""
self._check_index_is_initialized(index_name)
total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs)
total_scores = [
scores_i[: len([i for i in indices_i if i >= 0])]
for scores_i, indices_i in zip(total_scores, total_indices)
]
total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices]
return BatchedNearestExamplesResults(total_scores, total_samples)
| datasets/src/datasets/search.py/0 | {
"file_path": "datasets/src/datasets/search.py",
"repo_id": "datasets",
"token_count": 15237
} | 72 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Utilities to handle file locking in `datasets`."""
import os
from filelock import FileLock as FileLock_
from filelock import UnixFileLock
from filelock import __version__ as _filelock_version
from packaging import version
class FileLock(FileLock_):
"""
A `filelock.FileLock` initializer that handles long paths.
It also uses the current umask for lock files.
"""
MAX_FILENAME_LENGTH = 255
def __init__(self, lock_file, *args, **kwargs):
# The "mode" argument is required if we want to use the current umask in filelock >= 3.10
# In previous previous it was already using the current umask.
if "mode" not in kwargs and version.parse(_filelock_version) >= version.parse("3.10.0"):
umask = os.umask(0o666)
os.umask(umask)
kwargs["mode"] = 0o666 & ~umask
lock_file = self.hash_filename_if_too_long(lock_file)
super().__init__(lock_file, *args, **kwargs)
@classmethod
def hash_filename_if_too_long(cls, path: str) -> str:
path = os.path.abspath(os.path.expanduser(path))
filename = os.path.basename(path)
max_filename_length = cls.MAX_FILENAME_LENGTH
if issubclass(cls, UnixFileLock):
max_filename_length = min(max_filename_length, os.statvfs(os.path.dirname(path)).f_namemax)
if len(filename) > max_filename_length:
dirname = os.path.dirname(path)
hashed_filename = str(hash(filename))
new_filename = (
filename[: max_filename_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
)
return os.path.join(dirname, new_filename)
else:
return path
| datasets/src/datasets/utils/_filelock.py/0 | {
"file_path": "datasets/src/datasets/utils/_filelock.py",
"repo_id": "datasets",
"token_count": 903
} | 73 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
import csv
import json
import os
import datasets
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
"second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
}
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class NewDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
]
DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"option1": datasets.Value("string"),
"answer": datasets.Value("string")
# These are the features of your dataset like images, labels ...
}
)
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"option2": datasets.Value("string"),
"second_domain_answer": datasets.Value("string")
# These are the features of your dataset like images, labels ...
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "train.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "dev.jsonl"),
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "test.jsonl"),
"split": "test"
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
if self.config.name == "first_domain":
# Yields examples as (key, example) tuples
yield key, {
"sentence": data["sentence"],
"option1": data["option1"],
"answer": "" if split == "test" else data["answer"],
}
else:
yield key, {
"sentence": data["sentence"],
"option2": data["option2"],
"second_domain_answer": "" if split == "test" else data["second_domain_answer"],
}
| datasets/templates/new_dataset_script.py/0 | {
"file_path": "datasets/templates/new_dataset_script.py",
"repo_id": "datasets",
"token_count": 3156
} | 74 |
import contextlib
import os
import sqlite3
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _check_sql_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = SqlDatasetReader(
"dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory
).read()
_check_sql_dataset(dataset, expected_features)
@require_sqlalchemy
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir).read()
_check_sql_dataset(dataset, expected_features)
def iter_sql_file(sqlite_path):
with contextlib.closing(sqlite3.connect(sqlite_path)) as con:
cur = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
@require_sqlalchemy
def test_dataset_to_sql(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=1).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_multiproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_invalidproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
with pytest.raises(ValueError):
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=0).write()
| datasets/tests/io/test_sql.py/0 | {
"file_path": "datasets/tests/io/test_sql.py",
"repo_id": "datasets",
"token_count": 1628
} | 75 |
import importlib
import os
import tempfile
import types
from contextlib import nullcontext as does_not_raise
from multiprocessing import Process
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from multiprocess.pool import Pool
from datasets.arrow_dataset import Dataset
from datasets.arrow_reader import DatasetNotOnHfGcsError
from datasets.arrow_writer import ArrowWriter
from datasets.builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.download.download_manager import DownloadMode
from datasets.features import Features, Value
from datasets.info import DatasetInfo, PostProcessedInfo
from datasets.iterable_dataset import IterableDataset
from datasets.load import configure_builder_class
from datasets.splits import Split, SplitDict, SplitGenerator, SplitInfo
from datasets.streaming import xjoin
from datasets.utils.file_utils import is_local_path
from datasets.utils.info_utils import VerificationMode
from datasets.utils.logging import INFO, get_logger
from .utils import (
assert_arrow_memory_doesnt_increase,
assert_arrow_memory_increases,
require_beam,
require_faiss,
set_current_working_directory_to_temp_dir,
)
class DummyBuilder(DatasetBuilder):
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _prepare_split(self, split_generator, **kwargs):
fname = f"{self.dataset_name}-{split_generator.name}.arrow"
with ArrowWriter(features=self.info.features, path=os.path.join(self._output_dir, fname)) as writer:
writer.write_batch({"text": ["foo"] * 100})
num_examples, num_bytes = writer.finalize()
split_generator.split_info.num_examples = num_examples
split_generator.split_info.num_bytes = num_bytes
class DummyGeneratorBasedBuilder(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _generate_examples(self):
for i in range(100):
yield i, {"text": "foo"}
class DummyArrowBasedBuilder(ArrowBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _generate_tables(self):
for i in range(10):
yield i, pa.table({"text": ["foo"] * 10})
class DummyBeamBasedBuilder(BeamBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _build_pcollection(self, pipeline):
import apache_beam as beam
def _process(item):
for i in range(10):
yield f"{i}_{item}", {"text": "foo"}
return pipeline | "Initialize" >> beam.Create(range(10)) | "Extract content" >> beam.FlatMap(_process)
class DummyGeneratorBasedBuilderWithIntegers(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _generate_examples(self):
for i in range(100):
yield i, {"id": i}
class DummyGeneratorBasedBuilderConfig(BuilderConfig):
def __init__(self, content="foo", times=2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.content = content
self.times = times
class DummyGeneratorBasedBuilderWithConfig(GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = DummyGeneratorBasedBuilderConfig
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN)]
def _generate_examples(self):
for i in range(100):
yield i, {"text": self.config.content * self.config.times}
class DummyBuilderWithMultipleConfigs(DummyBuilder):
BUILDER_CONFIGS = [
DummyGeneratorBasedBuilderConfig(name="a"),
DummyGeneratorBasedBuilderConfig(name="b"),
]
class DummyBuilderWithDefaultConfig(DummyBuilderWithMultipleConfigs):
DEFAULT_CONFIG_NAME = "a"
class DummyBuilderWithDownload(DummyBuilder):
def __init__(self, *args, rel_path=None, abs_path=None, **kwargs):
super().__init__(*args, **kwargs)
self._rel_path = rel_path
self._abs_path = abs_path
def _split_generators(self, dl_manager):
if self._rel_path is not None:
assert os.path.exists(dl_manager.download(self._rel_path)), "dl_manager must support relative paths"
if self._abs_path is not None:
assert os.path.exists(dl_manager.download(self._abs_path)), "dl_manager must support absolute paths"
return [SplitGenerator(name=Split.TRAIN)]
class DummyBuilderWithManualDownload(DummyBuilderWithMultipleConfigs):
@property
def manual_download_instructions(self):
return "To use the dataset you have to download some stuff manually and pass the data path to data_dir"
def _split_generators(self, dl_manager):
if not os.path.exists(self.config.data_dir):
raise FileNotFoundError(f"data_dir {self.config.data_dir} doesn't exist.")
return [SplitGenerator(name=Split.TRAIN)]
class DummyArrowBasedBuilderWithShards(ArrowBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})]
def _generate_tables(self, filepaths):
idx = 0
for filepath in filepaths:
for i in range(10):
yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10})
idx += 1
class DummyGeneratorBasedBuilderWithShards(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")}))
def _split_generators(self, dl_manager):
return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})]
def _generate_examples(self, filepaths):
idx = 0
for filepath in filepaths:
for i in range(100):
yield idx, {"id": i, "filepath": filepath}
idx += 1
class DummyArrowBasedBuilderWithAmbiguousShards(ArrowBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"filepaths": [f"data{i}.txt" for i in range(4)],
"dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)],
},
)
]
def _generate_tables(self, filepaths, dummy_kwarg_with_different_length):
idx = 0
for filepath in filepaths:
for i in range(10):
yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10})
idx += 1
class DummyGeneratorBasedBuilderWithAmbiguousShards(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")}))
def _split_generators(self, dl_manager):
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"filepaths": [f"data{i}.txt" for i in range(4)],
"dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)],
},
)
]
def _generate_examples(self, filepaths, dummy_kwarg_with_different_length):
idx = 0
for filepath in filepaths:
for i in range(100):
yield idx, {"id": i, "filepath": filepath}
idx += 1
def _run_concurrent_download_and_prepare(tmp_dir):
builder = DummyBuilder(cache_dir=tmp_dir)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS)
return builder
def check_streaming(builder):
builders_module = importlib.import_module(builder.__module__)
assert builders_module._patched_for_streaming
assert builders_module.os.path.join is xjoin
class BuilderTest(TestCase):
def test_download_and_prepare(self):
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow"
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
def test_download_and_prepare_checksum_computation(self):
with tempfile.TemporaryDirectory() as tmp_dir:
builder_no_verification = DummyBuilder(cache_dir=tmp_dir)
builder_no_verification.download_and_prepare(
try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD
)
self.assertTrue(
all(v["checksum"] is not None for _, v in builder_no_verification.info.download_checksums.items())
)
builder_with_verification = DummyBuilder(cache_dir=tmp_dir)
builder_with_verification.download_and_prepare(
try_from_hf_gcs=False,
download_mode=DownloadMode.FORCE_REDOWNLOAD,
verification_mode=VerificationMode.ALL_CHECKS,
)
self.assertTrue(
all(v["checksum"] is None for _, v in builder_with_verification.info.download_checksums.items())
)
def test_concurrent_download_and_prepare(self):
with tempfile.TemporaryDirectory() as tmp_dir:
processes = 2
with Pool(processes=processes) as pool:
jobs = [
pool.apply_async(_run_concurrent_download_and_prepare, kwds={"tmp_dir": tmp_dir})
for _ in range(processes)
]
builders = [job.get() for job in jobs]
for builder in builders:
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(
os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")
)
)
def test_download_and_prepare_with_base_path(self):
with tempfile.TemporaryDirectory() as tmp_dir:
rel_path = "dummy1.data"
abs_path = os.path.join(tmp_dir, "dummy2.data")
# test relative path is missing
builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path)
with self.assertRaises(FileNotFoundError):
builder.download_and_prepare(
try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir
)
# test absolute path is missing
builder = DummyBuilderWithDownload(cache_dir=tmp_dir, abs_path=abs_path)
with self.assertRaises(FileNotFoundError):
builder.download_and_prepare(
try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir
)
# test that they are both properly loaded when they exist
open(os.path.join(tmp_dir, rel_path), "w")
open(abs_path, "w")
builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path, abs_path=abs_path)
builder.download_and_prepare(
try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir
)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
)
def test_as_dataset_with_post_process(self):
def _post_process(self, dataset, resources_paths):
def char_tokenize(example):
return {"tokens": list(example["text"])}
return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"])
def _post_processing_resources(self, split):
return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"}
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder.info.post_processed = PostProcessedInfo(
features=Features({"text": Value("string"), "tokens": [Value("string")]})
)
builder._post_process = types.MethodType(_post_process, builder)
builder._post_processing_resources = types.MethodType(_post_processing_resources, builder)
os.makedirs(builder.cache_dir)
builder.info.splits = SplitDict()
builder.info.splits.add(SplitInfo("train", num_examples=10))
builder.info.splits.add(SplitInfo("test", num_examples=10))
for split in builder.info.splits:
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 10})
writer.finalize()
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"tokenized_dataset-{split}.arrow"),
features=Features({"text": Value("string"), "tokens": [Value("string")]}),
) as writer:
writer.write_batch({"text": ["foo"] * 10, "tokens": [list("foo")] * 10})
writer.finalize()
dsets = builder.as_dataset()
self.assertIsInstance(dsets, DatasetDict)
self.assertListEqual(list(dsets.keys()), ["train", "test"])
self.assertEqual(len(dsets["train"]), 10)
self.assertEqual(len(dsets["test"]), 10)
self.assertDictEqual(
dsets["train"].features, Features({"text": Value("string"), "tokens": [Value("string")]})
)
self.assertDictEqual(
dsets["test"].features, Features({"text": Value("string"), "tokens": [Value("string")]})
)
self.assertListEqual(dsets["train"].column_names, ["text", "tokens"])
self.assertListEqual(dsets["test"].column_names, ["text", "tokens"])
del dsets
dset = builder.as_dataset("train")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train")
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]}))
self.assertListEqual(dset.column_names, ["text", "tokens"])
self.assertGreater(builder.info.post_processing_size, 0)
self.assertGreater(
builder.info.post_processed.resources_checksums["train"]["tokenized_dataset"]["num_bytes"], 0
)
del dset
dset = builder.as_dataset("train+test[:30%]")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train+test[:30%]")
self.assertEqual(len(dset), 13)
self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]}))
self.assertListEqual(dset.column_names, ["text", "tokens"])
del dset
dset = builder.as_dataset("all")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train+test")
self.assertEqual(len(dset), 20)
self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]}))
self.assertListEqual(dset.column_names, ["text", "tokens"])
del dset
def _post_process(self, dataset, resources_paths):
return dataset.select([0, 1], keep_in_memory=True)
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._post_process = types.MethodType(_post_process, builder)
os.makedirs(builder.cache_dir)
builder.info.splits = SplitDict()
builder.info.splits.add(SplitInfo("train", num_examples=10))
builder.info.splits.add(SplitInfo("test", num_examples=10))
for split in builder.info.splits:
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 10})
writer.finalize()
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 2})
writer.finalize()
dsets = builder.as_dataset()
self.assertIsInstance(dsets, DatasetDict)
self.assertListEqual(list(dsets.keys()), ["train", "test"])
self.assertEqual(len(dsets["train"]), 2)
self.assertEqual(len(dsets["test"]), 2)
self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")}))
self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")}))
self.assertListEqual(dsets["train"].column_names, ["text"])
self.assertListEqual(dsets["test"].column_names, ["text"])
del dsets
dset = builder.as_dataset("train")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train")
self.assertEqual(len(dset), 2)
self.assertDictEqual(dset.features, Features({"text": Value("string")}))
self.assertListEqual(dset.column_names, ["text"])
del dset
dset = builder.as_dataset("train+test[:30%]")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train+test[:30%]")
self.assertEqual(len(dset), 2)
self.assertDictEqual(dset.features, Features({"text": Value("string")}))
self.assertListEqual(dset.column_names, ["text"])
del dset
@require_faiss
def test_as_dataset_with_post_process_with_index(self):
def _post_process(self, dataset, resources_paths):
if os.path.exists(resources_paths["index"]):
dataset.load_faiss_index("my_index", resources_paths["index"])
return dataset
else:
dataset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index"
)
dataset.save_faiss_index("my_index", resources_paths["index"])
return dataset
def _post_processing_resources(self, split):
return {"index": f"Flat-{split}.faiss"}
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._post_process = types.MethodType(_post_process, builder)
builder._post_processing_resources = types.MethodType(_post_processing_resources, builder)
os.makedirs(builder.cache_dir)
builder.info.splits = SplitDict()
builder.info.splits.add(SplitInfo("train", num_examples=10))
builder.info.splits.add(SplitInfo("test", num_examples=10))
for split in builder.info.splits:
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 10})
writer.finalize()
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 2})
writer.finalize()
dsets = builder.as_dataset()
self.assertIsInstance(dsets, DatasetDict)
self.assertListEqual(list(dsets.keys()), ["train", "test"])
self.assertEqual(len(dsets["train"]), 10)
self.assertEqual(len(dsets["test"]), 10)
self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")}))
self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")}))
self.assertListEqual(dsets["train"].column_names, ["text"])
self.assertListEqual(dsets["test"].column_names, ["text"])
self.assertListEqual(dsets["train"].list_indexes(), ["my_index"])
self.assertListEqual(dsets["test"].list_indexes(), ["my_index"])
self.assertGreater(builder.info.post_processing_size, 0)
self.assertGreater(builder.info.post_processed.resources_checksums["train"]["index"]["num_bytes"], 0)
del dsets
dset = builder.as_dataset("train")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train")
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"text": Value("string")}))
self.assertListEqual(dset.column_names, ["text"])
self.assertListEqual(dset.list_indexes(), ["my_index"])
del dset
dset = builder.as_dataset("train+test[:30%]")
self.assertIsInstance(dset, Dataset)
self.assertEqual(dset.split, "train+test[:30%]")
self.assertEqual(len(dset), 13)
self.assertDictEqual(dset.features, Features({"text": Value("string")}))
self.assertListEqual(dset.column_names, ["text"])
self.assertListEqual(dset.list_indexes(), ["my_index"])
del dset
def test_download_and_prepare_with_post_process(self):
def _post_process(self, dataset, resources_paths):
def char_tokenize(example):
return {"tokens": list(example["text"])}
return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"])
def _post_processing_resources(self, split):
return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"}
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder.info.post_processed = PostProcessedInfo(
features=Features({"text": Value("string"), "tokens": [Value("string")]})
)
builder._post_process = types.MethodType(_post_process, builder)
builder._post_processing_resources = types.MethodType(_post_processing_resources, builder)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow"
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertDictEqual(
builder.info.post_processed.features,
Features({"text": Value("string"), "tokens": [Value("string")]}),
)
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
def _post_process(self, dataset, resources_paths):
return dataset.select([0, 1], keep_in_memory=True)
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._post_process = types.MethodType(_post_process, builder)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow"
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertIsNone(builder.info.post_processed)
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
def _post_process(self, dataset, resources_paths):
if os.path.exists(resources_paths["index"]):
dataset.load_faiss_index("my_index", resources_paths["index"])
return dataset
else:
dataset = dataset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index"
)
dataset.save_faiss_index("my_index", resources_paths["index"])
return dataset
def _post_processing_resources(self, split):
return {"index": f"Flat-{split}.faiss"}
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._post_process = types.MethodType(_post_process, builder)
builder._post_processing_resources = types.MethodType(_post_processing_resources, builder)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow"
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertIsNone(builder.info.post_processed)
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
def test_error_download_and_prepare(self):
def _prepare_split(self, split_generator, **kwargs):
raise ValueError()
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyBuilder(cache_dir=tmp_dir)
builder._prepare_split = types.MethodType(_prepare_split, builder)
self.assertRaises(
ValueError,
builder.download_and_prepare,
try_from_hf_gcs=False,
download_mode=DownloadMode.FORCE_REDOWNLOAD,
)
self.assertRaises(FileNotFoundError, builder.as_dataset)
def test_generator_based_download_and_prepare(self):
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
self.assertTrue(
os.path.exists(
os.path.join(
tmp_dir,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
)
self.assertDictEqual(builder.info.features, Features({"text": Value("string")}))
self.assertEqual(builder.info.splits["train"].num_examples, 100)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
)
# Test that duplicated keys are ignored if verification_mode is "no_checks"
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir)
with patch("datasets.builder.ArrowWriter", side_effect=ArrowWriter) as mock_arrow_writer:
builder.download_and_prepare(
download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS
)
mock_arrow_writer.assert_called_once()
args, kwargs = mock_arrow_writer.call_args_list[0]
self.assertFalse(kwargs["check_duplicates"])
mock_arrow_writer.reset_mock()
builder.download_and_prepare(
download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.BASIC_CHECKS
)
mock_arrow_writer.assert_called_once()
args, kwargs = mock_arrow_writer.call_args_list[0]
self.assertTrue(kwargs["check_duplicates"])
def test_cache_dir_no_args(self):
with tempfile.TemporaryDirectory() as tmp_dir:
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_dir=None, data_files=None)
relative_cache_dir_parts = Path(builder._relative_data_dir()).parts
self.assertTupleEqual(relative_cache_dir_parts, (builder.dataset_name, "default", "0.0.0"))
def test_cache_dir_for_data_files(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dummy_data1 = os.path.join(tmp_dir, "dummy_data1.txt")
with open(dummy_data1, "w", encoding="utf-8") as f:
f.writelines("foo bar")
dummy_data2 = os.path.join(tmp_dir, "dummy_data2.txt")
with open(dummy_data2, "w", encoding="utf-8") as f:
f.writelines("foo bar\n")
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1])
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": dummy_data1})
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={Split.TRAIN: dummy_data1})
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": [dummy_data1]})
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"test": dummy_data1})
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data2)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2])
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2])
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2])
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2])
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2, dummy_data1])
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2}
)
other_builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2}
)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir, data_files={"train": [dummy_data1], "test": dummy_data2}
)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir, data_files={"train": dummy_data1, "validation": dummy_data2}
)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilder(
cache_dir=tmp_dir,
data_files={"train": [dummy_data1, dummy_data2], "test": dummy_data2},
)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
def test_cache_dir_for_features(self):
with tempfile.TemporaryDirectory() as tmp_dir:
f1 = Features({"id": Value("int8")})
f2 = Features({"id": Value("int32")})
builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1)
other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f2)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
def test_cache_dir_for_config_kwargs(self):
with tempfile.TemporaryDirectory() as tmp_dir:
# create config on the fly
builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo", times=2)
other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, times=2, content="foo")
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
self.assertIn("content=foo", builder.cache_dir)
self.assertIn("times=2", builder.cache_dir)
other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="bar", times=2)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo")
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
with tempfile.TemporaryDirectory() as tmp_dir:
# overwrite an existing config
builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo", times=2)
other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", times=2, content="foo")
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
self.assertIn("content=foo", builder.cache_dir)
self.assertIn("times=2", builder.cache_dir)
other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="bar", times=2)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo")
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
def test_config_names(self):
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(ValueError) as error_context:
DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, data_files=None, data_dir=None)
self.assertIn("Please pick one among the available configs", str(error_context.exception))
builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a")
self.assertEqual(builder.config.name, "a")
builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="b")
self.assertEqual(builder.config.name, "b")
with self.assertRaises(ValueError):
DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir)
builder = DummyBuilderWithDefaultConfig(cache_dir=tmp_dir)
self.assertEqual(builder.config.name, "a")
def test_cache_dir_for_data_dir(self):
with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as data_dir:
builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir)
other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir)
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=tmp_dir)
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
def test_cache_dir_for_configured_builder(self):
with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as data_dir:
builder_cls = configure_builder_class(
DummyBuilderWithManualDownload,
builder_configs=[BuilderConfig(data_dir=data_dir)],
default_config_name=None,
dataset_name="dummy",
)
builder = builder_cls(cache_dir=tmp_dir, hash="abc")
other_builder = builder_cls(cache_dir=tmp_dir, hash="abc")
self.assertEqual(builder.cache_dir, other_builder.cache_dir)
other_builder = builder_cls(cache_dir=tmp_dir, hash="def")
self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)
def test_arrow_based_download_and_prepare(tmp_path):
builder = DummyArrowBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare()
assert os.path.exists(
os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
assert builder.info.features, Features({"text": Value("string")})
assert builder.info.splits["train"].num_examples == 100
assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
@require_beam
def test_beam_based_download_and_prepare(tmp_path):
builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner")
builder.download_and_prepare()
assert os.path.exists(
os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train.arrow",
)
)
assert builder.info.features, Features({"text": Value("string")})
assert builder.info.splits["train"].num_examples == 100
assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json"))
@require_beam
def test_beam_based_as_dataset(tmp_path):
builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner")
builder.download_and_prepare()
dataset = builder.as_dataset()
assert dataset
assert isinstance(dataset["train"], Dataset)
assert len(dataset["train"]) > 0
@pytest.mark.parametrize(
"split, expected_dataset_class, expected_dataset_length",
[
(None, DatasetDict, 10),
("train", Dataset, 10),
("train+test[:30%]", Dataset, 13),
],
)
@pytest.mark.parametrize("in_memory", [False, True])
def test_builder_as_dataset(split, expected_dataset_class, expected_dataset_length, in_memory, tmp_path):
cache_dir = str(tmp_path)
builder = DummyBuilder(cache_dir=cache_dir)
os.makedirs(builder.cache_dir)
builder.info.splits = SplitDict()
builder.info.splits.add(SplitInfo("train", num_examples=10))
builder.info.splits.add(SplitInfo("test", num_examples=10))
for info_split in builder.info.splits:
with ArrowWriter(
path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{info_split}.arrow"),
features=Features({"text": Value("string")}),
) as writer:
writer.write_batch({"text": ["foo"] * 10})
writer.finalize()
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
dataset = builder.as_dataset(split=split, in_memory=in_memory)
assert isinstance(dataset, expected_dataset_class)
if isinstance(dataset, DatasetDict):
assert list(dataset.keys()) == ["train", "test"]
datasets = dataset.values()
expected_splits = ["train", "test"]
elif isinstance(dataset, Dataset):
datasets = [dataset]
expected_splits = [split]
for dataset, expected_split in zip(datasets, expected_splits):
assert dataset.split == expected_split
assert len(dataset) == expected_dataset_length
assert dataset.features == Features({"text": Value("string")})
dataset.column_names == ["text"]
@pytest.mark.parametrize("in_memory", [False, True])
def test_generator_based_builder_as_dataset(in_memory, tmp_path):
cache_dir = tmp_path / "data"
cache_dir.mkdir()
cache_dir = str(cache_dir)
builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
dataset = builder.as_dataset("train", in_memory=in_memory)
assert dataset.data.to_pydict() == {"text": ["foo"] * 100}
@pytest.mark.parametrize(
"writer_batch_size, default_writer_batch_size, expected_chunks", [(None, None, 1), (None, 5, 20), (10, None, 10)]
)
def test_custom_writer_batch_size(tmp_path, writer_batch_size, default_writer_batch_size, expected_chunks):
cache_dir = str(tmp_path)
if default_writer_batch_size:
DummyGeneratorBasedBuilder.DEFAULT_WRITER_BATCH_SIZE = default_writer_batch_size
builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir, writer_batch_size=writer_batch_size)
assert builder._writer_batch_size == (writer_batch_size or default_writer_batch_size)
builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD)
dataset = builder.as_dataset("train")
assert len(dataset.data[0].chunks) == expected_chunks
def test_builder_as_streaming_dataset(tmp_path):
dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path))
check_streaming(dummy_builder)
dsets = dummy_builder.as_streaming_dataset()
assert isinstance(dsets, IterableDatasetDict)
assert isinstance(dsets["train"], IterableDataset)
assert len(list(dsets["train"])) == 100
dset = dummy_builder.as_streaming_dataset(split="train")
assert isinstance(dset, IterableDataset)
assert len(list(dset)) == 100
@require_beam
def test_beam_based_builder_as_streaming_dataset(tmp_path):
builder = DummyBeamBasedBuilder(cache_dir=tmp_path)
check_streaming(builder)
with pytest.raises(DatasetNotOnHfGcsError):
builder.as_streaming_dataset()
def _run_test_builder_streaming_works_in_subprocesses(builder):
check_streaming(builder)
dset = builder.as_streaming_dataset(split="train")
assert isinstance(dset, IterableDataset)
assert len(list(dset)) == 100
def test_builder_streaming_works_in_subprocess(tmp_path):
dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path))
p = Process(target=_run_test_builder_streaming_works_in_subprocesses, args=(dummy_builder,))
p.start()
p.join()
class DummyBuilderWithVersion(GeneratorBasedBuilder):
VERSION = "2.0.0"
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
pass
def _generate_examples(self):
pass
class DummyBuilderWithBuilderConfigs(GeneratorBasedBuilder):
BUILDER_CONFIGS = [BuilderConfig(name="custom", version="2.0.0")]
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
pass
def _generate_examples(self):
pass
class CustomBuilderConfig(BuilderConfig):
def __init__(self, date=None, language=None, version="2.0.0", **kwargs):
name = f"{date}.{language}"
super().__init__(name=name, version=version, **kwargs)
self.date = date
self.language = language
class DummyBuilderWithCustomBuilderConfigs(GeneratorBasedBuilder):
BUILDER_CONFIGS = [CustomBuilderConfig(date="20220501", language="en")]
BUILDER_CONFIG_CLASS = CustomBuilderConfig
def _info(self):
return DatasetInfo(features=Features({"text": Value("string")}))
def _split_generators(self, dl_manager):
pass
def _generate_examples(self):
pass
@pytest.mark.parametrize(
"builder_class, kwargs",
[
(DummyBuilderWithVersion, {}),
(DummyBuilderWithBuilderConfigs, {"config_name": "custom"}),
(DummyBuilderWithCustomBuilderConfigs, {"config_name": "20220501.en"}),
(DummyBuilderWithCustomBuilderConfigs, {"date": "20220501", "language": "ca"}),
],
)
def test_builder_config_version(builder_class, kwargs, tmp_path):
cache_dir = str(tmp_path)
builder = builder_class(cache_dir=cache_dir, **kwargs)
assert builder.config.version == "2.0.0"
def test_builder_download_and_prepare_with_absolute_output_dir(tmp_path):
builder = DummyGeneratorBasedBuilder()
output_dir = str(tmp_path)
builder.download_and_prepare(output_dir)
assert builder._output_dir.startswith(tmp_path.resolve().as_posix())
assert os.path.exists(os.path.join(output_dir, "dataset_info.json"))
assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow"))
assert not os.path.exists(os.path.join(output_dir + ".incomplete"))
def test_builder_download_and_prepare_with_relative_output_dir():
with set_current_working_directory_to_temp_dir():
builder = DummyGeneratorBasedBuilder()
output_dir = "test-out"
builder.download_and_prepare(output_dir)
assert Path(builder._output_dir).resolve().as_posix().startswith(Path(output_dir).resolve().as_posix())
assert os.path.exists(os.path.join(output_dir, "dataset_info.json"))
assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow"))
assert not os.path.exists(os.path.join(output_dir + ".incomplete"))
def test_builder_with_filesystem_download_and_prepare(tmp_path, mockfs):
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options)
assert builder._output_dir.startswith("mock://my_dataset")
assert is_local_path(builder._cache_downloaded_dir)
assert isinstance(builder._fs, type(mockfs))
assert builder._fs.storage_options == mockfs.storage_options
assert mockfs.exists("my_dataset/dataset_info.json")
assert mockfs.exists(f"my_dataset/{builder.dataset_name}-train.arrow")
assert not mockfs.exists("my_dataset.incomplete")
def test_builder_with_filesystem_download_and_prepare_reload(tmp_path, mockfs, caplog):
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path)
mockfs.makedirs("my_dataset")
DatasetInfo().write_to_directory("mock://my_dataset", storage_options=mockfs.storage_options)
mockfs.touch(f"my_dataset/{builder.dataset_name}-train.arrow")
caplog.clear()
with caplog.at_level(INFO, logger=get_logger().name):
builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options)
assert "Found cached dataset" in caplog.text
def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path):
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare(file_format="parquet")
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet"
)
assert os.path.exists(parquet_path)
assert pq.ParquetFile(parquet_path) is not None
def test_generator_based_builder_download_and_prepare_sharded(tmp_path):
writer_batch_size = 25
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size)
with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard
builder.download_and_prepare(file_format="parquet")
expected_num_shards = 100 // writer_batch_size
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet",
)
assert os.path.exists(parquet_path)
parquet_files = [
pq.ParquetFile(parquet_path)
for parquet_path in Path(tmp_path).rglob(
f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet"
)
]
assert len(parquet_files) == expected_num_shards
assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100
def test_generator_based_builder_download_and_prepare_with_max_shard_size(tmp_path):
writer_batch_size = 25
builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size)
builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one batch per shard
expected_num_shards = 100 // writer_batch_size
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet",
)
assert os.path.exists(parquet_path)
parquet_files = [
pq.ParquetFile(parquet_path)
for parquet_path in Path(tmp_path).rglob(
f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet"
)
]
assert len(parquet_files) == expected_num_shards
assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100
def test_generator_based_builder_download_and_prepare_with_num_proc(tmp_path):
builder = DummyGeneratorBasedBuilderWithShards(cache_dir=tmp_path)
builder.download_and_prepare(num_proc=2)
expected_num_shards = 2
assert builder.info.splits["train"].num_examples == 400
assert builder.info.splits["train"].shard_lengths == [200, 200]
arrow_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow",
)
assert os.path.exists(arrow_path)
ds = builder.as_dataset("train")
assert len(ds) == 400
assert ds.to_dict() == {
"id": [i for _ in range(4) for i in range(100)],
"filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)],
}
@pytest.mark.parametrize(
"num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))]
)
def test_generator_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path):
builder = DummyGeneratorBasedBuilderWithAmbiguousShards(cache_dir=tmp_path)
with expectation:
builder.download_and_prepare(num_proc=num_proc)
def test_arrow_based_builder_download_and_prepare_as_parquet(tmp_path):
builder = DummyArrowBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare(file_format="parquet")
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet"
)
assert os.path.exists(parquet_path)
assert pq.ParquetFile(parquet_path) is not None
def test_arrow_based_builder_download_and_prepare_sharded(tmp_path):
builder = DummyArrowBasedBuilder(cache_dir=tmp_path)
with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard
builder.download_and_prepare(file_format="parquet")
expected_num_shards = 10
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet",
)
assert os.path.exists(parquet_path)
parquet_files = [
pq.ParquetFile(parquet_path)
for parquet_path in Path(tmp_path).rglob(
f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet"
)
]
assert len(parquet_files) == expected_num_shards
assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100
def test_arrow_based_builder_download_and_prepare_with_max_shard_size(tmp_path):
builder = DummyArrowBasedBuilder(cache_dir=tmp_path)
builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one table per shard
expected_num_shards = 10
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet",
)
assert os.path.exists(parquet_path)
parquet_files = [
pq.ParquetFile(parquet_path)
for parquet_path in Path(tmp_path).rglob(
f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet"
)
]
assert len(parquet_files) == expected_num_shards
assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100
def test_arrow_based_builder_download_and_prepare_with_num_proc(tmp_path):
builder = DummyArrowBasedBuilderWithShards(cache_dir=tmp_path)
builder.download_and_prepare(num_proc=2)
expected_num_shards = 2
assert builder.info.splits["train"].num_examples == 400
assert builder.info.splits["train"].shard_lengths == [200, 200]
arrow_path = os.path.join(
tmp_path,
builder.dataset_name,
"default",
"0.0.0",
f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow",
)
assert os.path.exists(arrow_path)
ds = builder.as_dataset("train")
assert len(ds) == 400
assert ds.to_dict() == {
"id": [i for _ in range(4) for i in range(100)],
"filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)],
}
@pytest.mark.parametrize(
"num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))]
)
def test_arrow_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path):
builder = DummyArrowBasedBuilderWithAmbiguousShards(cache_dir=tmp_path)
with expectation:
builder.download_and_prepare(num_proc=num_proc)
@require_beam
def test_beam_based_builder_download_and_prepare_as_parquet(tmp_path):
builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner")
builder.download_and_prepare(file_format="parquet")
assert builder.info.splits["train"].num_examples == 100
parquet_path = os.path.join(
tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet"
)
assert os.path.exists(parquet_path)
assert pq.ParquetFile(parquet_path) is not None
| datasets/tests/test_builder.py/0 | {
"file_path": "datasets/tests/test_builder.py",
"repo_id": "datasets",
"token_count": 26439
} | 76 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size", [None, 400 * 2**20, 600 * 2**20])
@pytest.mark.parametrize("input_in_memory_max_size", ["default", 0, 100 * 2**20, 900 * 2**20])
def test_is_small_dataset(dataset_size, input_in_memory_max_size, monkeypatch):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", input_in_memory_max_size)
in_memory_max_size = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
expected = dataset_size < in_memory_max_size
else:
expected = False
result = is_small_dataset(dataset_size)
assert result == expected
| datasets/tests/test_info_utils.py/0 | {
"file_path": "datasets/tests/test_info_utils.py",
"repo_id": "datasets",
"token_count": 366
} | 77 |
import copy
import pickle
import warnings
from typing import List, Union
import numpy as np
import pyarrow as pa
import pytest
import datasets
from datasets import Sequence, Value
from datasets.features.features import Array2D, Array2DExtensionType, ClassLabel, Features, Image
from datasets.table import (
ConcatenationTable,
InMemoryTable,
MemoryMappedTable,
Table,
TableBlock,
_in_memory_arrow_table_from_buffer,
_in_memory_arrow_table_from_file,
_interpolation_search,
_is_extension_type,
_memory_mapped_arrow_table_from_file,
array_concat,
cast_array_to_feature,
concat_tables,
embed_array_storage,
embed_table_storage,
inject_arrow_table_documentation,
table_cast,
table_iter,
)
from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, slow
@pytest.fixture(scope="session")
def in_memory_pa_table(arrow_file) -> pa.Table:
return pa.ipc.open_stream(arrow_file).read_all()
def _to_testing_blocks(table: TableBlock) -> List[List[TableBlock]]:
assert len(table) > 2
blocks = [
[table.slice(0, 2)],
[table.slice(2).drop([c for c in table.column_names if c != "tokens"]), table.slice(2).drop(["tokens"])],
]
return blocks
@pytest.fixture(scope="session")
def in_memory_blocks(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table)
return _to_testing_blocks(table)
@pytest.fixture(scope="session")
def memory_mapped_blocks(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
return _to_testing_blocks(table)
@pytest.fixture(scope="session")
def mixed_in_memory_and_memory_mapped_blocks(in_memory_blocks, memory_mapped_blocks):
return in_memory_blocks[:1] + memory_mapped_blocks[1:]
def assert_deepcopy_without_bringing_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_doesnt_increase():
copied_table = copy.deepcopy(table)
assert isinstance(copied_table, MemoryMappedTable)
assert copied_table.table == table.table
def assert_deepcopy_does_bring_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_increases():
copied_table = copy.deepcopy(table)
assert isinstance(copied_table, MemoryMappedTable)
assert copied_table.table == table.table
def assert_pickle_without_bringing_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_doesnt_increase():
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert isinstance(unpickled_table, MemoryMappedTable)
assert unpickled_table.table == table.table
def assert_pickle_does_bring_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_increases():
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert isinstance(unpickled_table, MemoryMappedTable)
assert unpickled_table.table == table.table
def assert_index_attributes_equal(table: Table, other: Table):
assert table._batches == other._batches
np.testing.assert_array_equal(table._offsets, other._offsets)
assert table._schema == other._schema
def add_suffix_to_column_names(table, suffix):
return table.rename_columns([f"{name}{suffix}" for name in table.column_names])
def test_inject_arrow_table_documentation(in_memory_pa_table):
method = pa.Table.slice
def function_to_wrap(*args):
return method(*args)
args = (0, 1)
wrapped_method = inject_arrow_table_documentation(method)(function_to_wrap)
assert method(in_memory_pa_table, *args) == wrapped_method(in_memory_pa_table, *args)
assert "pyarrow.Table" not in wrapped_method.__doc__
assert "Table" in wrapped_method.__doc__
def test_in_memory_arrow_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_increases():
pa_table = _in_memory_arrow_table_from_file(arrow_file)
assert in_memory_pa_table == pa_table
def test_in_memory_arrow_table_from_buffer(in_memory_pa_table):
with assert_arrow_memory_increases():
buf_writer = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema)
writer.write_table(in_memory_pa_table)
writer.close()
buf_writer.close()
pa_table = _in_memory_arrow_table_from_buffer(buf_writer.getvalue())
assert in_memory_pa_table == pa_table
def test_memory_mapped_arrow_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_doesnt_increase():
pa_table = _memory_mapped_arrow_table_from_file(arrow_file)
assert in_memory_pa_table == pa_table
def test_table_init(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.table == in_memory_pa_table
def test_table_validate(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.validate() == in_memory_pa_table.validate()
def test_table_equals(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.equals(in_memory_pa_table)
def test_table_to_batches(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.to_batches() == in_memory_pa_table.to_batches()
def test_table_to_pydict(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.to_pydict() == in_memory_pa_table.to_pydict()
def test_table_to_string(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.to_string() == in_memory_pa_table.to_string()
def test_table_field(in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
table = Table(in_memory_pa_table)
assert table.field("tokens") == in_memory_pa_table.field("tokens")
def test_table_column(in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
table = Table(in_memory_pa_table)
assert table.column("tokens") == in_memory_pa_table.column("tokens")
def test_table_itercolumns(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert isinstance(table.itercolumns(), type(in_memory_pa_table.itercolumns()))
assert list(table.itercolumns()) == list(in_memory_pa_table.itercolumns())
def test_table_getitem(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table[0] == in_memory_pa_table[0]
def test_table_len(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert len(table) == len(in_memory_pa_table)
def test_table_str(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert str(table) == str(in_memory_pa_table).replace("pyarrow.Table", "Table")
assert repr(table) == repr(in_memory_pa_table).replace("pyarrow.Table", "Table")
@pytest.mark.parametrize(
"attribute", ["schema", "columns", "num_columns", "num_rows", "shape", "nbytes", "column_names"]
)
def test_table_attributes(in_memory_pa_table, attribute):
table = Table(in_memory_pa_table)
assert getattr(table, attribute) == getattr(in_memory_pa_table, attribute)
def test_in_memory_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_increases():
table = InMemoryTable.from_file(arrow_file)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_buffer(in_memory_pa_table):
with assert_arrow_memory_increases():
buf_writer = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema)
writer.write_table(in_memory_pa_table)
writer.close()
buf_writer.close()
table = InMemoryTable.from_buffer(buf_writer.getvalue())
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_pandas(in_memory_pa_table):
df = in_memory_pa_table.to_pandas()
with assert_arrow_memory_increases():
# with no schema it might infer another order of the fields in the schema
table = InMemoryTable.from_pandas(df)
assert isinstance(table, InMemoryTable)
# by specifying schema we get the same order of features, and so the exact same table
table = InMemoryTable.from_pandas(df, schema=in_memory_pa_table.schema)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_arrays(in_memory_pa_table):
arrays = list(in_memory_pa_table.columns)
names = list(in_memory_pa_table.column_names)
table = InMemoryTable.from_arrays(arrays, names=names)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_pydict(in_memory_pa_table):
pydict = in_memory_pa_table.to_pydict()
with assert_arrow_memory_increases():
table = InMemoryTable.from_pydict(pydict)
assert isinstance(table, InMemoryTable)
assert table.table == pa.Table.from_pydict(pydict)
def test_in_memory_table_from_pylist(in_memory_pa_table):
pylist = InMemoryTable(in_memory_pa_table).to_pylist()
table = InMemoryTable.from_pylist(pylist)
assert isinstance(table, InMemoryTable)
assert pylist == table.to_pylist()
def test_in_memory_table_from_batches(in_memory_pa_table):
batches = list(in_memory_pa_table.to_batches())
table = InMemoryTable.from_batches(batches)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_deepcopy(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table)
copied_table = copy.deepcopy(table)
assert table.table == copied_table.table
assert_index_attributes_equal(table, copied_table)
# deepcopy must return the exact same arrow objects since they are immutable
assert table.table is copied_table.table
assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches))
def test_in_memory_table_pickle(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table)
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert unpickled_table.table == table.table
assert_index_attributes_equal(table, unpickled_table)
@slow
def test_in_memory_table_pickle_big_table():
big_table_4GB = InMemoryTable.from_pydict({"col": [0] * ((4 * 8 << 30) // 64)})
length = len(big_table_4GB)
big_table_4GB = pickle.dumps(big_table_4GB)
big_table_4GB = pickle.loads(big_table_4GB)
assert len(big_table_4GB) == length
def test_in_memory_table_slice(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).slice(1, 2)
assert table.table == in_memory_pa_table.slice(1, 2)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_filter(in_memory_pa_table):
mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))])
table = InMemoryTable(in_memory_pa_table).filter(mask)
assert table.table == in_memory_pa_table.filter(mask)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_flatten(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).flatten()
assert table.table == in_memory_pa_table.flatten()
assert isinstance(table, InMemoryTable)
def test_in_memory_table_combine_chunks(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).combine_chunks()
assert table.table == in_memory_pa_table.combine_chunks()
assert isinstance(table, InMemoryTable)
def test_in_memory_table_cast(in_memory_pa_table):
assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types
schema = pa.schema(
{
k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32())
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = InMemoryTable(in_memory_pa_table).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_cast_reorder_struct():
table = InMemoryTable(
pa.Table.from_pydict(
{
"top": [
{
"foo": "a",
"bar": "b",
}
]
}
)
)
schema = pa.schema({"top": pa.struct({"bar": pa.string(), "foo": pa.string()})})
assert table.cast(schema).schema == schema
def test_in_memory_table_cast_with_hf_features():
table = InMemoryTable(pa.Table.from_pydict({"labels": [0, 1]}))
features = Features({"labels": ClassLabel(names=["neg", "pos"])})
schema = features.arrow_schema
assert table.cast(schema).schema == schema
assert Features.from_arrow_schema(table.cast(schema).schema) == features
def test_in_memory_table_replace_schema_metadata(in_memory_pa_table):
metadata = {"huggingface": "{}"}
table = InMemoryTable(in_memory_pa_table).replace_schema_metadata(metadata)
assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata
assert isinstance(table, InMemoryTable)
def test_in_memory_table_add_column(in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = InMemoryTable(in_memory_pa_table).add_column(i, field_, column)
assert table.table == in_memory_pa_table.add_column(i, field_, column)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_append_column(in_memory_pa_table):
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = InMemoryTable(in_memory_pa_table).append_column(field_, column)
assert table.table == in_memory_pa_table.append_column(field_, column)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_remove_column(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).remove_column(0)
assert table.table == in_memory_pa_table.remove_column(0)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_set_column(in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = InMemoryTable(in_memory_pa_table).set_column(i, field_, column)
assert table.table == in_memory_pa_table.set_column(i, field_, column)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_rename_columns(in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names]
table = InMemoryTable(in_memory_pa_table).rename_columns(names)
assert table.table == in_memory_pa_table.rename_columns(names)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_drop(in_memory_pa_table):
names = [in_memory_pa_table.column_names[0]]
table = InMemoryTable(in_memory_pa_table).drop(names)
assert table.table == in_memory_pa_table.drop(names)
assert isinstance(table, InMemoryTable)
def test_memory_mapped_table_init(arrow_file, in_memory_pa_table):
table = MemoryMappedTable(_memory_mapped_arrow_table_from_file(arrow_file), arrow_file)
assert table.table == in_memory_pa_table
assert isinstance(table, MemoryMappedTable)
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file)
assert table.table == in_memory_pa_table
assert isinstance(table, MemoryMappedTable)
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_from_file_with_replay(arrow_file, in_memory_pa_table):
replays = [("slice", (0, 1), {}), ("flatten", (), {})]
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file, replays=replays)
assert len(table) == 1
for method, args, kwargs in replays:
in_memory_pa_table = getattr(in_memory_pa_table, method)(*args, **kwargs)
assert table.table == in_memory_pa_table
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_deepcopy(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
copied_table = copy.deepcopy(table)
assert table.table == copied_table.table
assert table.path == copied_table.path
assert_index_attributes_equal(table, copied_table)
# deepcopy must return the exact same arrow objects since they are immutable
assert table.table is copied_table.table
assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches))
def test_memory_mapped_table_pickle(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert unpickled_table.table == table.table
assert unpickled_table.path == table.path
assert_index_attributes_equal(table, unpickled_table)
def test_memory_mapped_table_pickle_doesnt_fill_memory(arrow_file):
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file)
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_pickle_applies_replay(arrow_file):
replays = [("slice", (0, 1), {}), ("flatten", (), {})]
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file, replays=replays)
assert isinstance(table, MemoryMappedTable)
assert table.replays == replays
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_slice(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).slice(1, 2)
assert table.table == in_memory_pa_table.slice(1, 2)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("slice", (1, 2), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_filter(arrow_file, in_memory_pa_table):
mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))])
table = MemoryMappedTable.from_file(arrow_file).filter(mask)
assert table.table == in_memory_pa_table.filter(mask)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("filter", (mask,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
# filter DOES increase memory
# assert_pickle_without_bringing_data_in_memory(table)
assert_pickle_does_bring_data_in_memory(table)
def test_memory_mapped_table_flatten(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).flatten()
assert table.table == in_memory_pa_table.flatten()
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("flatten", (), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_combine_chunks(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).combine_chunks()
assert table.table == in_memory_pa_table.combine_chunks()
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("combine_chunks", (), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_cast(arrow_file, in_memory_pa_table):
assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types
schema = pa.schema(
{
k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32())
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = MemoryMappedTable.from_file(arrow_file).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("cast", (schema,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
# cast DOES increase memory when converting integers precision for example
# assert_pickle_without_bringing_data_in_memory(table)
assert_pickle_does_bring_data_in_memory(table)
def test_memory_mapped_table_replace_schema_metadata(arrow_file, in_memory_pa_table):
metadata = {"huggingface": "{}"}
table = MemoryMappedTable.from_file(arrow_file).replace_schema_metadata(metadata)
assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("replace_schema_metadata", (metadata,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_add_column(arrow_file, in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = MemoryMappedTable.from_file(arrow_file).add_column(i, field_, column)
assert table.table == in_memory_pa_table.add_column(i, field_, column)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("add_column", (i, field_, column), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_append_column(arrow_file, in_memory_pa_table):
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = MemoryMappedTable.from_file(arrow_file).append_column(field_, column)
assert table.table == in_memory_pa_table.append_column(field_, column)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("append_column", (field_, column), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_remove_column(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).remove_column(0)
assert table.table == in_memory_pa_table.remove_column(0)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("remove_column", (0,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_set_column(arrow_file, in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = MemoryMappedTable.from_file(arrow_file).set_column(i, field_, column)
assert table.table == in_memory_pa_table.set_column(i, field_, column)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("set_column", (i, field_, column), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_rename_columns(arrow_file, in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names]
table = MemoryMappedTable.from_file(arrow_file).rename_columns(names)
assert table.table == in_memory_pa_table.rename_columns(names)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("rename_columns", (names,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_drop(arrow_file, in_memory_pa_table):
names = [in_memory_pa_table.column_names[0]]
table = MemoryMappedTable.from_file(arrow_file).drop(names)
assert table.table == in_memory_pa_table.drop(names)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("drop", (names,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_init(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = (
in_memory_blocks
if blocks_type == "in_memory"
else memory_mapped_blocks
if blocks_type == "memory_mapped"
else mixed_in_memory_and_memory_mapped_blocks
)
table = ConcatenationTable(in_memory_pa_table, blocks)
assert table.table == in_memory_pa_table
assert table.blocks == blocks
def test_concatenation_table_from_blocks(in_memory_pa_table, in_memory_blocks):
assert len(in_memory_pa_table) > 2
in_memory_table = InMemoryTable(in_memory_pa_table)
t1, t2 = in_memory_table.slice(0, 2), in_memory_table.slice(2)
table = ConcatenationTable.from_blocks(in_memory_table)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
table = ConcatenationTable.from_blocks([t1, t2])
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
table = ConcatenationTable.from_blocks([[t1], [t2]])
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
table = ConcatenationTable.from_blocks(in_memory_blocks)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_from_blocks_doesnt_increase_memory(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
with assert_arrow_memory_doesnt_increase():
table = ConcatenationTable.from_blocks(blocks)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
if blocks_type == "in_memory":
assert table.blocks == [[InMemoryTable(in_memory_pa_table)]]
else:
assert table.blocks == blocks
@pytest.mark.parametrize("axis", [0, 1])
def test_concatenation_table_from_tables(axis, in_memory_pa_table, arrow_file):
in_memory_table = InMemoryTable(in_memory_pa_table)
concatenation_table = ConcatenationTable.from_blocks(in_memory_table)
memory_mapped_table = MemoryMappedTable.from_file(arrow_file)
tables = [in_memory_pa_table, in_memory_table, concatenation_table, memory_mapped_table]
if axis == 0:
expected_table = pa.concat_tables([in_memory_pa_table] * len(tables))
else:
# avoids error due to duplicate column names
tables[1:] = [add_suffix_to_column_names(table, i) for i, table in enumerate(tables[1:], 1)]
expected_table = in_memory_pa_table
for table in tables[1:]:
for name, col in zip(table.column_names, table.columns):
expected_table = expected_table.append_column(name, col)
with assert_arrow_memory_doesnt_increase():
table = ConcatenationTable.from_tables(tables, axis=axis)
assert isinstance(table, ConcatenationTable)
assert table.table == expected_table
# because of consolidation, we end up with 1 InMemoryTable and 1 MemoryMappedTable
assert len(table.blocks) == 1 if axis == 1 else 2
assert len(table.blocks[0]) == 1 if axis == 0 else 2
assert axis == 1 or len(table.blocks[1]) == 1
assert isinstance(table.blocks[0][0], InMemoryTable)
assert isinstance(table.blocks[1][0] if axis == 0 else table.blocks[0][1], MemoryMappedTable)
def test_concatenation_table_from_tables_axis1_misaligned_blocks(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
t1 = table.slice(0, 2)
t2 = table.slice(0, 3).rename_columns([col + "_1" for col in table.column_names])
concatenated = ConcatenationTable.from_tables(
[
ConcatenationTable.from_blocks([[t1], [t1], [t1]]),
ConcatenationTable.from_blocks([[t2], [t2]]),
],
axis=1,
)
assert len(concatenated) == 6
assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2]
concatenated = ConcatenationTable.from_tables(
[
ConcatenationTable.from_blocks([[t2], [t2]]),
ConcatenationTable.from_blocks([[t1], [t1], [t1]]),
],
axis=1,
)
assert len(concatenated) == 6
assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2]
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_deepcopy(
blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks)
copied_table = copy.deepcopy(table)
assert table.table == copied_table.table
assert table.blocks == copied_table.blocks
assert_index_attributes_equal(table, copied_table)
# deepcopy must return the exact same arrow objects since they are immutable
assert table.table is copied_table.table
assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches))
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_pickle(
blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks)
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert unpickled_table.table == table.table
assert unpickled_table.blocks == table.blocks
assert_index_attributes_equal(table, unpickled_table)
def test_concat_tables_with_features_metadata(arrow_file, in_memory_pa_table):
input_features = Features.from_arrow_schema(in_memory_pa_table.schema)
input_features["id"] = Value("int64", id="my_id")
intput_schema = input_features.arrow_schema
t0 = in_memory_pa_table.replace_schema_metadata(intput_schema.metadata)
t1 = MemoryMappedTable.from_file(arrow_file)
tables = [t0, t1]
concatenated_table = concat_tables(tables, axis=0)
output_schema = concatenated_table.schema
output_features = Features.from_arrow_schema(output_schema)
assert output_schema == intput_schema
assert output_schema.metadata == intput_schema.metadata
assert output_features == input_features
assert output_features["id"].id == "my_id"
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_slice(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).slice(1, 2)
assert table.table == in_memory_pa_table.slice(1, 2)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_filter(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))])
table = ConcatenationTable.from_blocks(blocks).filter(mask)
assert table.table == in_memory_pa_table.filter(mask)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_flatten(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).flatten()
assert table.table == in_memory_pa_table.flatten()
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_combine_chunks(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).combine_chunks()
assert table.table == in_memory_pa_table.combine_chunks()
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_cast(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types
assert pa.int64() in in_memory_pa_table.schema.types
schema = pa.schema(
{
k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32())
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = ConcatenationTable.from_blocks(blocks).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, ConcatenationTable)
schema = pa.schema(
{
k: v if v != pa.int64() else pa.int32()
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = ConcatenationTable.from_blocks(blocks).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concat_tables_cast_with_features_metadata(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
input_features = Features.from_arrow_schema(in_memory_pa_table.schema)
input_features["id"] = Value("int64", id="my_id")
intput_schema = input_features.arrow_schema
concatenated_table = ConcatenationTable.from_blocks(blocks).cast(intput_schema)
output_schema = concatenated_table.schema
output_features = Features.from_arrow_schema(output_schema)
assert output_schema == intput_schema
assert output_schema.metadata == intput_schema.metadata
assert output_features == input_features
assert output_features["id"].id == "my_id"
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_replace_schema_metadata(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
metadata = {"huggingface": "{}"}
table = ConcatenationTable.from_blocks(blocks).replace_schema_metadata(metadata)
assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_add_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
with pytest.raises(NotImplementedError):
ConcatenationTable.from_blocks(blocks).add_column(i, field_, column)
# assert table.table == in_memory_pa_table.add_column(i, field_, column)
# unpickled_table = pickle.loads(pickle.dumps(table))
# assert unpickled_table.table == in_memory_pa_table.add_column(i, field_, column)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_append_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
with pytest.raises(NotImplementedError):
ConcatenationTable.from_blocks(blocks).append_column(field_, column)
# assert table.table == in_memory_pa_table.append_column(field_, column)
# unpickled_table = pickle.loads(pickle.dumps(table))
# assert unpickled_table.table == in_memory_pa_table.append_column(field_, column)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_remove_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).remove_column(0)
assert table.table == in_memory_pa_table.remove_column(0)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_set_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
with pytest.raises(NotImplementedError):
ConcatenationTable.from_blocks(blocks).set_column(i, field_, column)
# assert table.table == in_memory_pa_table.set_column(i, field_, column)
# unpickled_table = pickle.loads(pickle.dumps(table))
# assert unpickled_table.table == in_memory_pa_table.set_column(i, field_, column)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_rename_columns(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
assert "tokens" in in_memory_pa_table.column_names
names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names]
table = ConcatenationTable.from_blocks(blocks).rename_columns(names)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table.rename_columns(names)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_drop(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
names = [in_memory_pa_table.column_names[0]]
table = ConcatenationTable.from_blocks(blocks).drop(names)
assert table.table == in_memory_pa_table.drop(names)
assert isinstance(table, ConcatenationTable)
def test_concat_tables(arrow_file, in_memory_pa_table):
t0 = in_memory_pa_table
t1 = InMemoryTable(t0)
t2 = MemoryMappedTable.from_file(arrow_file)
t3 = ConcatenationTable.from_blocks(t1)
tables = [t0, t1, t2, t3]
concatenated_table = concat_tables(tables, axis=0)
assert concatenated_table.table == pa.concat_tables([t0] * 4)
assert concatenated_table.table.shape == (40, 4)
assert isinstance(concatenated_table, ConcatenationTable)
assert len(concatenated_table.blocks) == 3 # t0 and t1 are consolidated as a single InMemoryTable
assert isinstance(concatenated_table.blocks[0][0], InMemoryTable)
assert isinstance(concatenated_table.blocks[1][0], MemoryMappedTable)
assert isinstance(concatenated_table.blocks[2][0], InMemoryTable)
# add suffix to avoid error due to duplicate column names
concatenated_table = concat_tables(
[add_suffix_to_column_names(table, i) for i, table in enumerate(tables)], axis=1
)
assert concatenated_table.table.shape == (10, 16)
assert len(concatenated_table.blocks[0]) == 3 # t0 and t1 are consolidated as a single InMemoryTable
assert isinstance(concatenated_table.blocks[0][0], InMemoryTable)
assert isinstance(concatenated_table.blocks[0][1], MemoryMappedTable)
assert isinstance(concatenated_table.blocks[0][2], InMemoryTable)
def _interpolation_search_ground_truth(arr: List[int], x: int) -> Union[int, IndexError]:
for i in range(len(arr) - 1):
if arr[i] <= x < arr[i + 1]:
return i
return IndexError
class _ListWithGetitemCounter(list):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.unique_getitem_calls = set()
def __getitem__(self, i):
out = super().__getitem__(i)
self.unique_getitem_calls.add(i)
return out
@property
def getitem_unique_count(self):
return len(self.unique_getitem_calls)
@pytest.mark.parametrize(
"arr, x",
[(np.arange(0, 14, 3), x) for x in range(-1, 22)]
+ [(list(np.arange(-5, 5)), x) for x in range(-6, 6)]
+ [([0, 1_000, 1_001, 1_003], x) for x in [-1, 0, 2, 100, 999, 1_000, 1_001, 1_002, 1_003, 1_004]]
+ [(list(range(1_000)), x) for x in [-1, 0, 1, 10, 666, 999, 1_000, 1_0001]],
)
def test_interpolation_search(arr, x):
ground_truth = _interpolation_search_ground_truth(arr, x)
if isinstance(ground_truth, int):
arr = _ListWithGetitemCounter(arr)
output = _interpolation_search(arr, x)
assert ground_truth == output
# 4 maximum unique getitem calls is expected for the cases of this test
# but it can be bigger for large and messy arrays.
assert arr.getitem_unique_count <= 4
else:
with pytest.raises(ground_truth):
_interpolation_search(arr, x)
def test_indexed_table_mixin():
n_rows_per_chunk = 10
n_chunks = 4
pa_table = pa.Table.from_pydict({"col": [0] * n_rows_per_chunk})
pa_table = pa.concat_tables([pa_table] * n_chunks)
table = Table(pa_table)
assert all(table._offsets.tolist() == np.cumsum([0] + [n_rows_per_chunk] * n_chunks))
assert table.fast_slice(5) == pa_table.slice(5)
assert table.fast_slice(2, 13) == pa_table.slice(2, 13)
@pytest.mark.parametrize(
"arrays",
[
[pa.array([[1, 2, 3, 4]]), pa.array([[10, 2]])],
[
pa.array([[[1, 2], [3]]], pa.list_(pa.list_(pa.int32()), 2)),
pa.array([[[10, 2, 3], [2]]], pa.list_(pa.list_(pa.int32()), 2)),
],
[pa.array([[[1, 2, 3]], [[2, 3], [20, 21]], [[4]]]).slice(1), pa.array([[[1, 2, 3]]])],
],
)
def test_concat_arrays(arrays):
assert array_concat(arrays) == pa.concat_arrays(arrays)
def test_concat_arrays_nested_with_nulls():
arrays = [pa.array([{"a": 21, "b": [[1, 2], [3]]}]), pa.array([{"a": 100, "b": [[1], None]}])]
concatenated_arrays = array_concat(arrays)
assert concatenated_arrays == pa.array([{"a": 21, "b": [[1, 2], [3]]}, {"a": 100, "b": [[1], None]}])
def test_concat_extension_arrays():
arrays = [pa.array([[[1, 2], [3, 4]]]), pa.array([[[10, 2], [3, 4]]])]
extension_type = Array2DExtensionType((2, 2), "int64")
assert array_concat([extension_type.wrap_array(array) for array in arrays]) == extension_type.wrap_array(
pa.concat_arrays(arrays)
)
def test_cast_array_to_features():
arr = pa.array([[0, 1]])
assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string())
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(Value("string")), allow_number_to_str=False)
def test_cast_array_to_features_nested():
arr = pa.array([[{"foo": [0]}]])
assert cast_array_to_feature(arr, [{"foo": Sequence(Value("string"))}]).type == pa.list_(
pa.struct({"foo": pa.list_(pa.string())})
)
def test_cast_array_to_features_to_nested_with_no_fields():
arr = pa.array([{}])
assert cast_array_to_feature(arr, {}).type == pa.struct({})
assert cast_array_to_feature(arr, {}).to_pylist() == arr.to_pylist()
def test_cast_array_to_features_nested_with_null_values():
# same type
arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}))
casted_array = cast_array_to_feature(arr, {"foo": [[Value("int64")]]})
assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})
assert casted_array.to_pylist() == arr.to_pylist()
# different type
arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}))
if datasets.config.PYARROW_VERSION.major < 10:
with pytest.warns(UserWarning, match="None values are converted to empty lists.+"):
casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]})
assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))})
assert casted_array.to_pylist() == [
{"foo": [[], [0]]}
] # empty list because of https://github.com/huggingface/datasets/issues/3676
else:
with warnings.catch_warnings():
warnings.simplefilter("error")
casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]})
assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))})
assert casted_array.to_pylist() == [{"foo": [None, [0]]}]
def test_cast_array_to_features_to_null_type():
# same type
arr = pa.array([[None, None]])
assert cast_array_to_feature(arr, Sequence(Value("null"))).type == pa.list_(pa.null())
# different type
arr = pa.array([[None, 1]])
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(Value("null")))
def test_cast_array_to_features_array_xd():
# same storage type
arr = pa.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], pa.list_(pa.list_(pa.int32(), 2), 2))
casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="int32"))
assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="int32")
# different storage type
casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="float32"))
assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="float32")
def test_cast_array_to_features_sequence_classlabel():
arr = pa.array([[], [1], [0, 1]], pa.list_(pa.int64()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
arr = pa.array([[], ["bar"], ["foo", "bar"]], pa.list_(pa.string()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
# Test empty arrays
arr = pa.array([[], []], pa.list_(pa.int64()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
arr = pa.array([[], []], pa.list_(pa.string()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
# Test invalid class labels
arr = pa.array([[2]], pa.list_(pa.int64()))
with pytest.raises(ValueError):
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"])))
arr = pa.array([["baz"]], pa.list_(pa.string()))
with pytest.raises(ValueError):
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"])))
def test_cast_fixed_size_array_to_features_sequence():
arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3))
# Fixed size list
casted_array = cast_array_to_feature(arr, Sequence(Value("int64"), length=3))
assert casted_array.type == pa.list_(pa.int64(), 3)
assert casted_array.to_pylist() == arr.to_pylist()
# Variable size list
casted_array = cast_array_to_feature(arr, Sequence(Value("int64")))
assert casted_array.type == pa.list_(pa.int64())
assert casted_array.to_pylist() == arr.to_pylist()
def test_cast_sliced_fixed_size_array_to_features():
arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3))
casted_array = cast_array_to_feature(arr[1:], Sequence(Value("int64"), length=3))
assert casted_array.type == pa.list_(pa.int64(), 3)
assert casted_array.to_pylist() == arr[1:].to_pylist()
def test_embed_array_storage(image_file):
array = pa.array([{"bytes": None, "path": image_file}], type=Image.pa_type)
embedded_images_array = embed_array_storage(array, Image())
assert isinstance(embedded_images_array.to_pylist()[0]["path"], str)
assert embedded_images_array.to_pylist()[0]["path"] == "test_image_rgb.jpg"
assert isinstance(embedded_images_array.to_pylist()[0]["bytes"], bytes)
def test_embed_array_storage_nested(image_file):
array = pa.array([[{"bytes": None, "path": image_file}]], type=pa.list_(Image.pa_type))
embedded_images_array = embed_array_storage(array, [Image()])
assert isinstance(embedded_images_array.to_pylist()[0][0]["path"], str)
assert isinstance(embedded_images_array.to_pylist()[0][0]["bytes"], bytes)
array = pa.array([{"foo": {"bytes": None, "path": image_file}}], type=pa.struct({"foo": Image.pa_type}))
embedded_images_array = embed_array_storage(array, {"foo": Image()})
assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["path"], str)
assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["bytes"], bytes)
def test_embed_table_storage(image_file):
features = Features({"image": Image()})
table = table_cast(pa.table({"image": [image_file]}), features.arrow_schema)
embedded_images_table = embed_table_storage(table)
assert isinstance(embedded_images_table.to_pydict()["image"][0]["path"], str)
assert isinstance(embedded_images_table.to_pydict()["image"][0]["bytes"], bytes)
@pytest.mark.parametrize(
"table",
[
InMemoryTable(pa.table({"foo": range(10)})),
InMemoryTable(pa.concat_tables([pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})])),
InMemoryTable(pa.concat_tables([pa.table({"foo": [i]}) for i in range(10)])),
],
)
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_table_iter(table, batch_size, drop_last_batch):
num_rows = len(table) if not drop_last_batch else len(table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(table_iter(table, batch_size=batch_size, drop_last_batch=drop_last_batch))
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for subtable in subtables)
else:
assert all(len(subtable) == batch_size for subtable in subtables[:-1])
assert len(subtables[-1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables(subtables)
assert table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
@pytest.mark.parametrize(
"pa_type, expected",
[
(pa.int8(), False),
(pa.struct({"col1": pa.int8(), "col2": pa.int64()}), False),
(pa.struct({"col1": pa.list_(pa.int8()), "col2": Array2DExtensionType((1, 3), "int64")}), True),
(pa.list_(pa.int8()), False),
(pa.list_(Array2DExtensionType((1, 3), "int64"), 4), True),
],
)
def test_is_extension_type(pa_type, expected):
assert _is_extension_type(pa_type) == expected
| datasets/tests/test_table.py/0 | {
"file_path": "datasets/tests/test_table.py",
"repo_id": "datasets",
"token_count": 21532
} | 78 |
<jupyter_start><jupyter_text>Unit 2: Q-Learning with FrozenLake-v1 ⛄ and Taxi-v3 🚕In this notebook, **you'll code your first Reinforcement Learning agent from scratch** to play FrozenLake ❄️ using Q-Learning, share it with the community, and experiment with different configurations.⬇️ Here is an example of what **you will achieve in just a couple of minutes.** ⬇️ 🎮 Environments:- [FrozenLake-v1](https://gymnasium.farama.org/environments/toy_text/frozen_lake/)- [Taxi-v3](https://gymnasium.farama.org/environments/toy_text/taxi/)📚 RL-Library:- Python and NumPy- [Gymnasium](https://gymnasium.farama.org/)We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to use **Gymnasium**, the environment library.- Be able to code a Q-Learning agent from scratch.- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from the Deep Reinforcement Learning Course In this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments**And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-courseDon’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 **Study [Q-Learning by reading Unit 2](https://huggingface.co/deep-rl-course/unit2/introduction)** 🤗 A small recap of Q-Learning *Q-Learning* **is the RL algorithm that**:- Trains *Q-Function*, an **action-value function** that encoded, in internal memory, by a *Q-table* **that contains all the state-action pair values.**- Given a state and action, our Q-Function **will search the Q-table for the corresponding value.** - When the training is done,**we have an optimal Q-Function, so an optimal Q-Table.** - And if we **have an optimal Q-function**, wehave an optimal policy, since we **know for, each state, the best action to take.**But, in the beginning, our **Q-Table is useless since it gives arbitrary value for each state-action pair (most of the time we initialize the Q-Table to 0 values)**. But, as we’ll explore the environment and update our Q-Table it will give us better and better approximationsThis is the Q-Learning pseudocode: Let's code our first Reinforcement Learning algorithm 🚀 To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process), you need to push your trained Taxi model to the Hub and **get a result of >= 4.5**.To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward**For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process Install dependencies and create a virtual display 🔽In the notebook, we'll need to generate a replay video. To do so, with Colab, **we need to have a virtual screen to render the environment** (and thus record the frames).Hence the following cell will install the libraries and create and run a virtual screen 🖥We’ll install multiple ones:- `gymnasium`: Contains the FrozenLake-v1 ⛄ and Taxi-v3 🚕 environments.- `pygame`: Used for the FrozenLake-v1 and Taxi-v3 UI.- `numpy`: Used for handling our Q-table.The Hugging Face Hub 🤗 works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations and other features that will allow you to easily collaborate with others.You can see here all the Deep RL models available (if they use Q Learning) here 👉 https://huggingface.co/models?other=q-learning<jupyter_code>!pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit2/requirements-unit2.txt
!sudo apt-get update
!sudo apt-get install -y python3-opengl
!apt install ffmpeg xvfb
!pip3 install pyvirtualdisplay<jupyter_output><empty_output><jupyter_text>To make sure the new installed libraries are used, **sometimes it's required to restart the notebook runtime**. The next cell will force the **runtime to crash, so you'll need to connect again and run the code starting from here**. Thanks to this trick, **we will be able to run our virtual screen.**<jupyter_code>import os
os.kill(os.getpid(), 9)
# Virtual display
from pyvirtualdisplay import Display
virtual_display = Display(visible=0, size=(1400, 900))
virtual_display.start()<jupyter_output><empty_output><jupyter_text>Import the packages 📦In addition to the installed libraries, we also use:- `random`: To generate random numbers (that will be useful for epsilon-greedy policy).- `imageio`: To generate a replay video.<jupyter_code>import numpy as np
import gymnasium as gym
import random
import imageio
import os
import tqdm
import pickle5 as pickle
from tqdm.notebook import tqdm<jupyter_output><empty_output><jupyter_text>We're now ready to code our Q-Learning algorithm 🔥 Part 1: Frozen Lake ⛄ (non slippery version) Create and understand [FrozenLake environment ⛄]((https://gymnasium.farama.org/environments/toy_text/frozen_lake/)---💡 A good habit when you start to use an environment is to check its documentation👉 https://gymnasium.farama.org/environments/toy_text/frozen_lake/---We're going to train our Q-Learning agent **to navigate from the starting state (S) to the goal state (G) by walking only on frozen tiles (F) and avoid holes (H)**.We can have two sizes of environment:- `map_name="4x4"`: a 4x4 grid version- `map_name="8x8"`: a 8x8 grid versionThe environment has two modes:- `is_slippery=False`: The agent always moves **in the intended direction** due to the non-slippery nature of the frozen lake (deterministic).- `is_slippery=True`: The agent **may not always move in the intended direction** due to the slippery nature of the frozen lake (stochastic). For now let's keep it simple with the 4x4 map and non-slippery.We add a parameter called `render_mode` that specifies how the environment should be visualised. In our case because we **want to record a video of the environment at the end, we need to set render_mode to rgb_array**.As [explained in the documentation](https://gymnasium.farama.org/api/env/gymnasium.Env.render) “rgb_array”: Return a single frame representing the current state of the environment. A frame is a np.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image.<jupyter_code># Create the FrozenLake-v1 environment using 4x4 map and non-slippery version and render_mode="rgb_array"
env = gym.make() # TODO use the correct parameters<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>env = gym.make("FrozenLake-v1", map_name="4x4", is_slippery=False, render_mode="rgb_array")<jupyter_output><empty_output><jupyter_text>You can create your own custom grid like this:```pythondesc=["SFFF", "FHFH", "FFFH", "HFFG"]gym.make('FrozenLake-v1', desc=desc, is_slippery=True)```but we'll use the default environment for now. Let's see what the Environment looks like:<jupyter_code># We create our environment with gym.make("<name_of_the_environment>")- `is_slippery=False`: The agent always moves in the intended direction due to the non-slippery nature of the frozen lake (deterministic).
print("_____OBSERVATION SPACE_____ \n")
print("Observation Space", env.observation_space)
print("Sample observation", env.observation_space.sample()) # Get a random observation<jupyter_output><empty_output><jupyter_text>We see with `Observation Space Shape Discrete(16)` that the observation is an integer representing the **agent’s current position as current_row * ncols + current_col (where both the row and col start at 0)**.For example, the goal position in the 4x4 map can be calculated as follows: 3 * 4 + 3 = 15. The number of possible observations is dependent on the size of the map. **For example, the 4x4 map has 16 possible observations.**For instance, this is what state = 0 looks like:<jupyter_code>print("\n _____ACTION SPACE_____ \n")
print("Action Space Shape", env.action_space.n)
print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>The action space (the set of possible actions the agent can take) is discrete with 4 actions available 🎮:- 0: GO LEFT- 1: GO DOWN- 2: GO RIGHT- 3: GO UPReward function 💰:- Reach goal: +1- Reach hole: 0- Reach frozen: 0 Create and Initialize the Q-table 🗄️(👀 Step 1 of the pseudocode)It's time to initialize our Q-table! To know how many rows (states) and columns (actions) to use, we need to know the action and observation space. We already know their values from before, but we'll want to obtain them programmatically so that our algorithm generalizes for different environments. Gym provides us a way to do that: `env.action_space.n` and `env.observation_space.n`<jupyter_code>state_space =
print("There are ", state_space, " possible states")
action_space =
print("There are ", action_space, " possible actions")
# Let's create our Qtable of size (state_space, action_space) and initialized each values at 0 using np.zeros. np.zeros needs a tuple (a,b)
def initialize_q_table(state_space, action_space):
Qtable =
return Qtable
Qtable_frozenlake = initialize_q_table(state_space, action_space)<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>state_space = env.observation_space.n
print("There are ", state_space, " possible states")
action_space = env.action_space.n
print("There are ", action_space, " possible actions")
# Let's create our Qtable of size (state_space, action_space) and initialized each values at 0 using np.zeros
def initialize_q_table(state_space, action_space):
Qtable = np.zeros((state_space, action_space))
return Qtable
Qtable_frozenlake = initialize_q_table(state_space, action_space)<jupyter_output><empty_output><jupyter_text>Define the greedy policy 🤖Remember we have two policies since Q-Learning is an **off-policy** algorithm. This means we're using a **different policy for acting and updating the value function**.- Epsilon-greedy policy (acting policy)- Greedy-policy (updating policy)The greedy policy will also be the final policy we'll have when the Q-learning agent completes training. The greedy policy is used to select an action using the Q-table.<jupyter_code>def greedy_policy(Qtable, state):
# Exploitation: take the action with the highest state, action value
action =
return action<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>def greedy_policy(Qtable, state):
# Exploitation: take the action with the highest state, action value
action = np.argmax(Qtable[state][:])
return action<jupyter_output><empty_output><jupyter_text>Define the epsilon-greedy policy 🤖Epsilon-greedy is the training policy that handles the exploration/exploitation trade-off.The idea with epsilon-greedy:- With *probability 1 - ɛ* : **we do exploitation** (i.e. our agent selects the action with the highest state-action pair value).- With *probability ɛ*: we do **exploration** (trying a random action).As the training continues, we progressively **reduce the epsilon value since we will need less and less exploration and more exploitation.**<jupyter_code>def epsilon_greedy_policy(Qtable, state, epsilon):
# Randomly generate a number between 0 and 1
random_num =
# if random_num > greater than epsilon --> exploitation
if random_num > epsilon:
# Take the action with the highest value given a state
# np.argmax can be useful here
action =
# else --> exploration
else:
action = # Take a random action
return action<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>def epsilon_greedy_policy(Qtable, state, epsilon):
# Randomly generate a number between 0 and 1
random_num = random.uniform(0,1)
# if random_num > greater than epsilon --> exploitation
if random_num > epsilon:
# Take the action with the highest value given a state
# np.argmax can be useful here
action = greedy_policy(Qtable, state)
# else --> exploration
else:
action = env.action_space.sample()
return action<jupyter_output><empty_output><jupyter_text>Define the hyperparameters ⚙️The exploration related hyperparamters are some of the most important ones.- We need to make sure that our agent **explores enough of the state space** to learn a good value approximation. To do that, we need to have progressive decay of the epsilon.- If you decrease epsilon too fast (too high decay_rate), **you take the risk that your agent will be stuck**, since your agent didn't explore enough of the state space and hence can't solve the problem.<jupyter_code># Training parameters
n_training_episodes = 10000 # Total training episodes
learning_rate = 0.7 # Learning rate
# Evaluation parameters
n_eval_episodes = 100 # Total number of test episodes
# Environment parameters
env_id = "FrozenLake-v1" # Name of the environment
max_steps = 99 # Max steps per episode
gamma = 0.95 # Discounting rate
eval_seed = [] # The evaluation seed of the environment
# Exploration parameters
max_epsilon = 1.0 # Exploration probability at start
min_epsilon = 0.05 # Minimum exploration probability
decay_rate = 0.0005 # Exponential decay rate for exploration prob<jupyter_output><empty_output><jupyter_text>Create the training loop methodThe training loop goes like this:```For episode in the total of training episodes:Reduce epsilon (since we need less and less exploration)Reset the environment For step in max timesteps: Choose the action At using epsilon greedy policy Take the action (a) and observe the outcome state(s') and reward (r) Update the Q-value Q(s,a) using Bellman equation Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)] If done, finish the episode Our next state is the new state```<jupyter_code>def train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable):
for episode in tqdm(range(n_training_episodes)):
# Reduce epsilon (because we need less and less exploration)
epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)
# Reset the environment
state, info = env.reset()
step = 0
terminated = False
truncated = False
# repeat
for step in range(max_steps):
# Choose the action At using epsilon greedy policy
action =
# Take action At and observe Rt+1 and St+1
# Take the action (a) and observe the outcome state(s') and reward (r)
new_state, reward, terminated, truncated, info =
# Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)]
Qtable[state][action] =
# If terminated or truncated finish the episode
if terminated or truncated:
break
# Our next state is the new state
state = new_state
return Qtable<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>def train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable):
for episode in tqdm(range(n_training_episodes)):
# Reduce epsilon (because we need less and less exploration)
epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)
# Reset the environment
state, info = env.reset()
step = 0
terminated = False
truncated = False
# repeat
for step in range(max_steps):
# Choose the action At using epsilon greedy policy
action = epsilon_greedy_policy(Qtable, state, epsilon)
# Take action At and observe Rt+1 and St+1
# Take the action (a) and observe the outcome state(s') and reward (r)
new_state, reward, terminated, truncated, info = env.step(action)
# Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)]
Qtable[state][action] = Qtable[state][action] + learning_rate * (reward + gamma * np.max(Qtable[new_state]) - Qtable[state][action])
# If terminated or truncated finish the episode
if terminated or truncated:
break
# Our next state is the new state
state = new_state
return Qtable<jupyter_output><empty_output><jupyter_text>Train the Q-Learning agent 🏃<jupyter_code>Qtable_frozenlake = train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable_frozenlake)<jupyter_output><empty_output><jupyter_text>Let's see what our Q-Learning table looks like now 👀<jupyter_code>Qtable_frozenlake<jupyter_output><empty_output><jupyter_text>The evaluation method 📝- We defined the evaluation method that we're going to use to test our Q-Learning agent.<jupyter_code>def evaluate_agent(env, max_steps, n_eval_episodes, Q, seed):
"""
Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward.
:param env: The evaluation environment
:param max_steps: Maximum number of steps per episode
:param n_eval_episodes: Number of episode to evaluate the agent
:param Q: The Q-table
:param seed: The evaluation seed array (for taxi-v3)
"""
episode_rewards = []
for episode in tqdm(range(n_eval_episodes)):
if seed:
state, info = env.reset(seed=seed[episode])
else:
state, info = env.reset()
step = 0
truncated = False
terminated = False
total_rewards_ep = 0
for step in range(max_steps):
# Take the action (index) that have the maximum expected future reward given that state
action = greedy_policy(Q, state)
new_state, reward, terminated, truncated, info = env.step(action)
total_rewards_ep += reward
if terminated or truncated:
break
state = new_state
episode_rewards.append(total_rewards_ep)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
return mean_reward, std_reward<jupyter_output><empty_output><jupyter_text>Evaluate our Q-Learning agent 📈- Usually, you should have a mean reward of 1.0- The **environment is relatively easy** since the state space is really small (16). What you can try to do is [to replace it with the slippery version](https://gymnasium.farama.org/environments/toy_text/frozen_lake/), which introduces stochasticity, making the environment more complex.<jupyter_code># Evaluate our Agent
mean_reward, std_reward = evaluate_agent(env, max_steps, n_eval_episodes, Qtable_frozenlake, eval_seed)
print(f"Mean_reward={mean_reward:.2f} +/- {std_reward:.2f}")<jupyter_output><empty_output><jupyter_text>Publish our trained model to the Hub 🔥Now that we saw good results after the training, **we can publish our trained model to the Hub 🤗 with one line of code**.Here's an example of a Model Card: Under the hood, the Hub uses git-based repositories (don't worry if you don't know what git is), which means you can update the model with new versions as you experiment and improve your agent. Do not modify this code<jupyter_code>from huggingface_hub import HfApi, snapshot_download
from huggingface_hub.repocard import metadata_eval_result, metadata_save
from pathlib import Path
import datetime
import json
def record_video(env, Qtable, out_directory, fps=1):
"""
Generate a replay video of the agent
:param env
:param Qtable: Qtable of our agent
:param out_directory
:param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1)
"""
images = []
terminated = False
truncated = False
state, info = env.reset(seed=random.randint(0,500))
img = env.render()
images.append(img)
while not terminated or truncated:
# Take the action (index) that have the maximum expected future reward given that state
action = np.argmax(Qtable[state][:])
state, reward, terminated, truncated, info = env.step(action) # We directly put next_state = state for recording logic
img = env.render()
images.append(img)
imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps)
def push_to_hub(
repo_id, model, env, video_fps=1, local_repo_path="hub"
):
"""
Evaluate, Generate a video and Upload a model to Hugging Face Hub.
This method does the complete pipeline:
- It evaluates the model
- It generates the model card
- It generates a replay video of the agent
- It pushes everything to the Hub
:param repo_id: repo_id: id of the model repository from the Hugging Face Hub
:param env
:param video_fps: how many frame per seconds to record our video replay
(with taxi-v3 and frozenlake-v1 we use 1)
:param local_repo_path: where the local repository is
"""
_, repo_name = repo_id.split("/")
eval_env = env
api = HfApi()
# Step 1: Create the repo
repo_url = api.create_repo(
repo_id=repo_id,
exist_ok=True,
)
# Step 2: Download files
repo_local_path = Path(snapshot_download(repo_id=repo_id))
# Step 3: Save the model
if env.spec.kwargs.get("map_name"):
model["map_name"] = env.spec.kwargs.get("map_name")
if env.spec.kwargs.get("is_slippery", "") == False:
model["slippery"] = False
# Pickle the model
with open((repo_local_path) / "q-learning.pkl", "wb") as f:
pickle.dump(model, f)
# Step 4: Evaluate the model and build JSON with evaluation metrics
mean_reward, std_reward = evaluate_agent(
eval_env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]
)
evaluate_data = {
"env_id": model["env_id"],
"mean_reward": mean_reward,
"n_eval_episodes": model["n_eval_episodes"],
"eval_datetime": datetime.datetime.now().isoformat()
}
# Write a JSON file called "results.json" that will contain the
# evaluation results
with open(repo_local_path / "results.json", "w") as outfile:
json.dump(evaluate_data, outfile)
# Step 5: Create the model card
env_name = model["env_id"]
if env.spec.kwargs.get("map_name"):
env_name += "-" + env.spec.kwargs.get("map_name")
if env.spec.kwargs.get("is_slippery", "") == False:
env_name += "-" + "no_slippery"
metadata = {}
metadata["tags"] = [env_name, "q-learning", "reinforcement-learning", "custom-implementation"]
# Add metrics
eval = metadata_eval_result(
model_pretty_name=repo_name,
task_pretty_name="reinforcement-learning",
task_id="reinforcement-learning",
metrics_pretty_name="mean_reward",
metrics_id="mean_reward",
metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}",
dataset_pretty_name=env_name,
dataset_id=env_name,
)
# Merges both dictionaries
metadata = {**metadata, **eval}
model_card = f"""
# **Q-Learning** Agent playing1 **{env_id}**
This is a trained model of a **Q-Learning** agent playing **{env_id}** .
## Usage
```python
model = load_from_hub(repo_id="{repo_id}", filename="q-learning.pkl")
# Don't forget to check if you need to add additional attributes (is_slippery=False etc)
env = gym.make(model["env_id"])
```
"""
evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"])
readme_path = repo_local_path / "README.md"
readme = ""
print(readme_path.exists())
if readme_path.exists():
with readme_path.open("r", encoding="utf8") as f:
readme = f.read()
else:
readme = model_card
with readme_path.open("w", encoding="utf-8") as f:
f.write(readme)
# Save our metrics to Readme metadata
metadata_save(readme_path, metadata)
# Step 6: Record a video
video_path = repo_local_path / "replay.mp4"
record_video(env, model["qtable"], video_path, video_fps)
# Step 7. Push everything to the Hub
api.upload_folder(
repo_id=repo_id,
folder_path=repo_local_path,
path_in_repo=".",
)
print("Your model is pushed to the Hub. You can view your model here: ", repo_url)<jupyter_output><empty_output><jupyter_text>.By using `push_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the Hub**.This way:- You can **showcase our work** 🔥- You can **visualize your agent playing** 👀- You can **share an agent with the community that others can use** 💾- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**<jupyter_code>from huggingface_hub import notebook_login
notebook_login()<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` (or `login`) 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `push_to_hub()` function- Let's create **the model dictionary that contains the hyperparameters and the Q_table**.<jupyter_code>model = {
"env_id": env_id,
"max_steps": max_steps,
"n_training_episodes": n_training_episodes,
"n_eval_episodes": n_eval_episodes,
"eval_seed": eval_seed,
"learning_rate": learning_rate,
"gamma": gamma,
"max_epsilon": max_epsilon,
"min_epsilon": min_epsilon,
"decay_rate": decay_rate,
"qtable": Qtable_frozenlake
}<jupyter_output><empty_output><jupyter_text>Let's fill the `push_to_hub` function:- `repo_id`: the name of the Hugging Face Hub Repository that will be created/updated `(repo_id = {username}/{repo_name})`💡 A good `repo_id` is `{username}/q-{env_id}`- `model`: our model dictionary containing the hyperparameters and the Qtable.- `env`: the environment.- `commit_message`: message of the commit<jupyter_code>model
username = "" # FILL THIS
repo_name = "q-FrozenLake-v1-4x4-noSlippery"
push_to_hub(
repo_id=f"{username}/{repo_name}",
model=model,
env=env)<jupyter_output><empty_output><jupyter_text>Congrats 🥳 you've just implemented from scratch, trained, and uploaded your first Reinforcement Learning agent.FrozenLake-v1 no_slippery is very simple environment, let's try a harder one 🔥. Part 2: Taxi-v3 🚖 Create and understand [Taxi-v3 🚕](https://gymnasium.farama.org/environments/toy_text/taxi/)---💡 A good habit when you start to use an environment is to check its documentation👉 https://gymnasium.farama.org/environments/toy_text/taxi/---In `Taxi-v3` 🚕, there are four designated locations in the grid world indicated by R(ed), G(reen), Y(ellow), and B(lue).When the episode starts, **the taxi starts off at a random square** and the passenger is at a random location. The taxi drives to the passenger’s location, **picks up the passenger**, drives to the passenger’s destination (another one of the four specified locations), and then **drops off the passenger**. Once the passenger is dropped off, the episode ends.<jupyter_code>env = gym.make("Taxi-v3", render_mode="rgb_array")<jupyter_output><empty_output><jupyter_text>There are **500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger** (including the case when the passenger is in the taxi), and **4 destination locations.**<jupyter_code>state_space = env.observation_space.n
print("There are ", state_space, " possible states")
action_space = env.action_space.n
print("There are ", action_space, " possible actions")<jupyter_output><empty_output><jupyter_text>The action space (the set of possible actions the agent can take) is discrete with **6 actions available 🎮**:- 0: move south- 1: move north- 2: move east- 3: move west- 4: pickup passenger- 5: drop off passengerReward function 💰:- -1 per step unless other reward is triggered.- +20 delivering passenger.- -10 executing “pickup” and “drop-off” actions illegally.<jupyter_code># Create our Q table with state_size rows and action_size columns (500x6)
Qtable_taxi = initialize_q_table(state_space, action_space)
print(Qtable_taxi)
print("Q-table shape: ", Qtable_taxi .shape)<jupyter_output><empty_output><jupyter_text>Define the hyperparameters ⚙️⚠ DO NOT MODIFY EVAL_SEED: the eval_seed array **allows us to evaluate your agent with the same taxi starting positions for every classmate**<jupyter_code># Training parameters
n_training_episodes = 25000 # Total training episodes
learning_rate = 0.7 # Learning rate
# Evaluation parameters
n_eval_episodes = 100 # Total number of test episodes
# DO NOT MODIFY EVAL_SEED
eval_seed = [16,54,165,177,191,191,120,80,149,178,48,38,6,125,174,73,50,172,100,148,146,6,25,40,68,148,49,167,9,97,164,176,61,7,54,55,
161,131,184,51,170,12,120,113,95,126,51,98,36,135,54,82,45,95,89,59,95,124,9,113,58,85,51,134,121,169,105,21,30,11,50,65,12,43,82,145,152,97,106,55,31,85,38,
112,102,168,123,97,21,83,158,26,80,63,5,81,32,11,28,148] # Evaluation seed, this ensures that all classmates agents are trained on the same taxi starting position
# Each seed has a specific starting state
# Environment parameters
env_id = "Taxi-v3" # Name of the environment
max_steps = 99 # Max steps per episode
gamma = 0.95 # Discounting rate
# Exploration parameters
max_epsilon = 1.0 # Exploration probability at start
min_epsilon = 0.05 # Minimum exploration probability
decay_rate = 0.005 # Exponential decay rate for exploration prob<jupyter_output><empty_output><jupyter_text>Train our Q-Learning agent 🏃<jupyter_code>Qtable_taxi = train(n_training_episodes, min_epsilon, max_epsilon, decay_rate, env, max_steps, Qtable_taxi)
Qtable_taxi<jupyter_output><empty_output><jupyter_text>Create a model dictionary 💾 and publish our trained model to the Hub 🔥- We create a model dictionary that will contain all the training hyperparameters for reproducibility and the Q-Table.<jupyter_code>model = {
"env_id": env_id,
"max_steps": max_steps,
"n_training_episodes": n_training_episodes,
"n_eval_episodes": n_eval_episodes,
"eval_seed": eval_seed,
"learning_rate": learning_rate,
"gamma": gamma,
"max_epsilon": max_epsilon,
"min_epsilon": min_epsilon,
"decay_rate": decay_rate,
"qtable": Qtable_taxi
}
username = "" # FILL THIS
repo_name = "" # FILL THIS
push_to_hub(
repo_id=f"{username}/{repo_name}",
model=model,
env=env)<jupyter_output><empty_output><jupyter_text>Now that it's on the Hub, you can compare the results of your Taxi-v3 with your classmates using the leaderboard 🏆 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard Part 3: Load from Hub 🔽What's amazing with Hugging Face Hub 🤗 is that you can easily load powerful models from the community.Loading a saved model from the Hub is really easy:1. You go https://huggingface.co/models?other=q-learning to see the list of all the q-learning saved models.2. You select one and copy its repo_id 3. Then we just need to use `load_from_hub` with:- The repo_id- The filename: the saved model inside the repo. Do not modify this code<jupyter_code>from urllib.error import HTTPError
from huggingface_hub import hf_hub_download
def load_from_hub(repo_id: str, filename: str) -> str:
"""
Download a model from Hugging Face Hub.
:param repo_id: id of the model repository from the Hugging Face Hub
:param filename: name of the model zip file from the repository
"""
# Get the model from the Hub, download and cache the model on your local disk
pickle_model = hf_hub_download(
repo_id=repo_id,
filename=filename
)
with open(pickle_model, 'rb') as f:
downloaded_model_file = pickle.load(f)
return downloaded_model_file<jupyter_output><empty_output><jupyter_text>.<jupyter_code>model = load_from_hub(repo_id="ThomasSimonini/q-Taxi-v3", filename="q-learning.pkl") # Try to use another model
print(model)
env = gym.make(model["env_id"])
evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"])
model = load_from_hub(repo_id="ThomasSimonini/q-FrozenLake-v1-no-slippery", filename="q-learning.pkl") # Try to use another model
env = gym.make(model["env_id"], is_slippery=False)
evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"])<jupyter_output><empty_output> | deep-rl-class/notebooks/unit2/unit2.ipynb/0 | {
"file_path": "deep-rl-class/notebooks/unit2/unit2.ipynb",
"repo_id": "deep-rl-class",
"token_count": 11160
} | 79 |
# Additional Readings [[additional-readings]]
These are **optional readings** if you want to go deeper.
## Deep Reinforcement Learning [[deep-rl]]
- [Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto Chapter 1, 2 and 3](http://incompleteideas.net/book/RLbook2020.pdf)
- [Foundations of Deep RL Series, L1 MDPs, Exact Solution Methods, Max-ent RL by Pieter Abbeel](https://youtu.be/2GwBez0D20A)
- [Spinning Up RL by OpenAI Part 1: Key concepts of RL](https://spinningup.openai.com/en/latest/spinningup/rl_intro.html)
## Gym [[gym]]
- [Getting Started With OpenAI Gym: The Basic Building Blocks](https://blog.paperspace.com/getting-started-with-openai-gym/)
- [Make your own Gym custom environment](https://www.gymlibrary.dev/content/environment_creation/)
| deep-rl-class/units/en/unit1/additional-readings.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit1/additional-readings.mdx",
"repo_id": "deep-rl-class",
"token_count": 246
} | 80 |
# Glossary [[glossary]]
This is a community-created glossary. Contributions are welcomed!
### Strategies to find the optimal policy
- **Policy-based methods.** The policy is usually trained with a neural network to select what action to take given a state. In this case it is the neural network which outputs the action that the agent should take instead of using a value function. Depending on the experience received by the environment, the neural network will be re-adjusted and will provide better actions.
- **Value-based methods.** In this case, a value function is trained to output the value of a state or a state-action pair that will represent our policy. However, this value doesn't define what action the agent should take. In contrast, we need to specify the behavior of the agent given the output of the value function. For example, we could decide to adopt a policy to take the action that always leads to the biggest reward (Greedy Policy). In summary, the policy is a Greedy Policy (or whatever decision the user takes) that uses the values of the value-function to decide the actions to take.
### Among the value-based methods, we can find two main strategies
- **The state-value function.** For each state, the state-value function is the expected return if the agent starts in that state and follows the policy until the end.
- **The action-value function.** In contrast to the state-value function, the action-value calculates for each state and action pair the expected return if the agent starts in that state, takes that action, and then follows the policy forever after.
### Epsilon-greedy strategy:
- Common strategy used in reinforcement learning that involves balancing exploration and exploitation.
- Chooses the action with the highest expected reward with a probability of 1-epsilon.
- Chooses a random action with a probability of epsilon.
- Epsilon is typically decreased over time to shift focus towards exploitation.
### Greedy strategy:
- Involves always choosing the action that is expected to lead to the highest reward, based on the current knowledge of the environment. (Only exploitation)
- Always chooses the action with the highest expected reward.
- Does not include any exploration.
- Can be disadvantageous in environments with uncertainty or unknown optimal actions.
### Off-policy vs on-policy algorithms
- **Off-policy algorithms:** A different policy is used at training time and inference time
- **On-policy algorithms:** The same policy is used during training and inference
### Monte Carlo and Temporal Difference learning strategies
- **Monte Carlo (MC):** Learning at the end of the episode. With Monte Carlo, we wait until the episode ends and then we update the value function (or policy function) from a complete episode.
- **Temporal Difference (TD):** Learning at each step. With Temporal Difference Learning, we update the value function (or policy function) at each step without requiring a complete episode.
If you want to improve the course, you can [open a Pull Request.](https://github.com/huggingface/deep-rl-class/pulls)
This glossary was made possible thanks to:
- [Ramón Rueda](https://github.com/ramon-rd)
- [Hasarindu Perera](https://github.com/hasarinduperera/)
- [Arkady Arkhangorodsky](https://github.com/arkadyark/)
| deep-rl-class/units/en/unit2/glossary.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit2/glossary.mdx",
"repo_id": "deep-rl-class",
"token_count": 760
} | 81 |
# From Q-Learning to Deep Q-Learning [[from-q-to-dqn]]
We learned that **Q-Learning is an algorithm we use to train our Q-Function**, an **action-value function** that determines the value of being at a particular state and taking a specific action at that state.
<figure>
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-function.jpg" alt="Q-function"/>
</figure>
The **Q comes from "the Quality" of that action at that state.**
Internally, our Q-function is encoded by **a Q-table, a table where each cell corresponds to a state-action pair value.** Think of this Q-table as **the memory or cheat sheet of our Q-function.**
The problem is that Q-Learning is a *tabular method*. This becomes a problem if the states and actions spaces **are not small enough to be represented efficiently by arrays and tables**. In other words: it is **not scalable**.
Q-Learning worked well with small state space environments like:
- FrozenLake, we had 16 states.
- Taxi-v3, we had 500 states.
But think of what we're going to do today: we will train an agent to learn to play Space Invaders, a more complex game, using the frames as input.
As **[Nikita Melkozerov mentioned](https://twitter.com/meln1k), Atari environments** have an observation space with a shape of (210, 160, 3)*, containing values ranging from 0 to 255 so that gives us \\(256^{210 \times 160 \times 3} = 256^{100800}\\) possible observations (for comparison, we have approximately \\(10^{80}\\) atoms in the observable universe).
* A single frame in Atari is composed of an image of 210x160 pixels. Given that the images are in color (RGB), there are 3 channels. This is why the shape is (210, 160, 3). For each pixel, the value can go from 0 to 255.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari.jpg" alt="Atari State Space"/>
Therefore, the state space is gigantic; due to this, creating and updating a Q-table for that environment would not be efficient. In this case, the best idea is to approximate the Q-values using a parametrized Q-function \\(Q_{\theta}(s,a)\\) .
This neural network will approximate, given a state, the different Q-values for each possible action at that state. And that's exactly what Deep Q-Learning does.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/deep.jpg" alt="Deep Q Learning"/>
Now that we understand Deep Q-Learning, let's dive deeper into the Deep Q-Network.
| deep-rl-class/units/en/unit3/from-q-to-dqn.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit3/from-q-to-dqn.mdx",
"repo_id": "deep-rl-class",
"token_count": 733
} | 82 |
# Conclusion
Congrats on finishing this unit! You’ve just trained your first ML-Agents and shared it to the Hub 🥳.
The best way to learn is to **practice and try stuff**. Why not try another environment? [ML-Agents has 18 different environments](https://github.com/Unity-Technologies/ml-agents/blob/develop/docs/Learning-Environment-Examples.md).
For instance:
- [Worm](https://singularite.itch.io/worm), where you teach a worm to crawl.
- [Walker](https://singularite.itch.io/walker), where you teach an agent to walk towards a goal.
Check the documentation to find out how to train them and to see the list of already integrated MLAgents environments on the Hub: https://github.com/huggingface/ml-agents#getting-started
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/envs-unity.jpeg" alt="Example envs"/>
In the next unit, we're going to learn about multi-agents. You're going to train your first multi-agents to compete in Soccer and Snowball fight against other classmate's agents.
<img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit7/snowballfight.gif" alt="Snownball fight"/>
Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9)
### Keep Learning, stay awesome 🤗
| deep-rl-class/units/en/unit5/conclusion.mdx/0 | {
"file_path": "deep-rl-class/units/en/unit5/conclusion.mdx",
"repo_id": "deep-rl-class",
"token_count": 430
} | 83 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.