text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer
from trl import GRPOConfig, GRPOTrainer, ModelConfig, ScriptArguments, TrlParser, get_peft_config
@dataclass
class GRPOScriptArguments(ScriptArguments):
"""
Script arguments for the GRPO training script.
Args:
reward_model_name_or_path (`str` or `None`):
Reward model id of a pretrained model hosted inside a model repo on huggingface.co or local path to a
directory containing model weights saved using [`~transformers.PreTrainedModel.save_pretrained`].
"""
reward_model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "Reward model id of a pretrained model hosted inside a model repo on huggingface.co or "
"local path to a directory containing model weights saved using `PreTrainedModel.save_pretrained`."
},
)
def main(script_args, training_args, model_args):
# Load a pretrained model
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
)
reward_model = AutoModelForSequenceClassification.from_pretrained(
script_args.reward_model_name_or_path, trust_remote_code=model_args.trust_remote_code, num_labels=1
)
# Load the dataset
dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config)
# Initialize the GRPO trainer
trainer = GRPOTrainer(
model=model,
reward_funcs=reward_model,
args=training_args,
train_dataset=dataset[script_args.dataset_train_split],
eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
processing_class=tokenizer,
peft_config=get_peft_config(model_args),
)
# Train and push the model to the Hub
trainer.train()
# Save and push to hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
def make_parser(subparsers: argparse._SubParsersAction = None):
dataclass_types = (GRPOScriptArguments, GRPOConfig, ModelConfig)
if subparsers is not None:
parser = subparsers.add_parser("grpo", help="Run the GRPO training script", dataclass_types=dataclass_types)
else:
parser = TrlParser(dataclass_types)
return parser
if __name__ == "__main__":
parser = make_parser()
script_args, training_args, model_args = parser.parse_args_and_config()
main(script_args, training_args, model_args)
| trl/trl/scripts/grpo.py/0 | {
"file_path": "trl/trl/scripts/grpo.py",
"repo_id": "trl",
"token_count": 1251
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import random
import textwrap
import warnings
from collections import defaultdict
from contextlib import contextmanager, nullcontext
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Callable, Literal, Optional, Union
import pandas as pd
import torch
import torch.amp as amp
import torch.nn as nn
import torch.nn.functional as F
import transformers
from accelerate import PartialState
from accelerate.utils import is_deepspeed_available, tqdm
from datasets import Dataset, IterableDataset
from packaging import version
from torch.utils.data import DataLoader
from transformers import (
AutoModelForCausalLM,
BaseImageProcessor,
DataCollator,
FeatureExtractionMixin,
PreTrainedModel,
PreTrainedTokenizerBase,
ProcessorMixin,
Trainer,
is_comet_available,
is_wandb_available,
)
from transformers.data.data_collator import DataCollatorMixin
from transformers.models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES
from transformers.trainer_callback import TrainerCallback
from transformers.trainer_utils import EvalLoopOutput
from transformers.utils import is_peft_available, is_torch_xpu_available
from transformers.utils.deprecation import deprecate_kwarg
from ..data_utils import maybe_apply_chat_template, maybe_extract_prompt
from ..models import PreTrainedModelWrapper, create_reference_model
from .callbacks import SyncRefModelCallback
from .dpo_config import DPOConfig, FDivergenceConstants, FDivergenceType
from .utils import (
RunningMoments,
cap_exp,
disable_dropout_in_model,
empty_cache,
flush_left,
generate_model_card,
get_comet_experiment_url,
log_table_to_comet_experiment,
pad,
pad_to_length,
peft_module_casting_to_bf16,
selective_log_softmax,
)
if is_peft_available():
from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training
if is_wandb_available():
import wandb
if is_deepspeed_available():
import deepspeed
@dataclass
class DataCollatorForPreference(DataCollatorMixin):
"""
Data collator used for preference data. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
pad_token_id (`int`):
Token ID to use for padding.
return_tensors (`str`, *optional*, defaults to `"pt"`):
Type of Tensor to return. Only `"pt"` is currently supported.
Examples:
```python
>>> from trl import DataCollatorForPreference
>>> collator = DataCollatorForPreference(pad_token_id=0)
>>> examples = [
... {"prompt_input_ids": [1, 2, 3], "chosen_input_ids": [4, 5], "rejected_input_ids": [6]},
... {"prompt_input_ids": [7, 8], "chosen_input_ids": [9, 10], "rejected_input_ids": [11, 12, 13]}
... ]
>>> collator(examples)
{'prompt_input_ids': tensor([[1, 2, 3],
[0, 7, 8]]),
'prompt_attention_mask': tensor([[1, 1, 1],
[0, 1, 1]]),
'chosen_input_ids': tensor([[ 4, 5],
[ 9, 10]]),
'chosen_attention_mask': tensor([[1, 1],
[1, 1]]),
'rejected_input_ids': tensor([[ 6, 0, 0],
[11, 12, 13]]),
'rejected_attention_mask': tensor([[1, 0, 0],
[1, 1, 1]])
}
```
"""
pad_token_id: int
return_tensors: str = "pt"
def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
# Convert to tensor
prompt_input_ids = [torch.tensor(example["prompt_input_ids"]) for example in examples]
prompt_attention_mask = [torch.ones_like(input_ids) for input_ids in prompt_input_ids]
chosen_input_ids = [torch.tensor(example["chosen_input_ids"]) for example in examples]
chosen_attention_mask = [torch.ones_like(input_ids) for input_ids in chosen_input_ids]
rejected_input_ids = [torch.tensor(example["rejected_input_ids"]) for example in examples]
rejected_attention_mask = [torch.ones_like(input_ids) for input_ids in rejected_input_ids]
if "pixel_values" in examples[0]:
pixel_values = [torch.tensor(example["pixel_values"]) for example in examples]
if "pixel_attention_mask" in examples[0]:
pixel_attention_mask = [torch.tensor(example["pixel_attention_mask"]) for example in examples]
if "ref_chosen_logps" in examples[0] and "ref_rejected_logps" in examples[0]:
ref_chosen_logps = torch.tensor([example["ref_chosen_logps"] for example in examples])
ref_rejected_logps = torch.tensor([example["ref_rejected_logps"] for example in examples])
# Pad
output = {}
output["prompt_input_ids"] = pad(prompt_input_ids, padding_value=self.pad_token_id, padding_side="left")
output["prompt_attention_mask"] = pad(prompt_attention_mask, padding_value=0, padding_side="left")
output["chosen_input_ids"] = pad(chosen_input_ids, padding_value=self.pad_token_id)
output["chosen_attention_mask"] = pad(chosen_attention_mask, padding_value=0)
output["rejected_input_ids"] = pad(rejected_input_ids, padding_value=self.pad_token_id)
output["rejected_attention_mask"] = pad(rejected_attention_mask, padding_value=0)
if "pixel_values" in examples[0]:
output["pixel_values"] = pad(pixel_values, padding_value=0.0)
if "pixel_attention_mask" in examples[0]:
output["pixel_attention_mask"] = pad(pixel_attention_mask, padding_value=0)
if "image_sizes" in examples[0]:
output["image_sizes"] = torch.tensor([example["image_sizes"] for example in examples])
if "ref_chosen_logps" in examples[0] and "ref_rejected_logps" in examples[0]:
output["ref_chosen_logps"] = ref_chosen_logps
output["ref_rejected_logps"] = ref_rejected_logps
return output
class DPOTrainer(Trainer):
r"""
Initialize DPOTrainer.
Args:
model (`transformers.PreTrainedModel`):
The model to train, preferably an `AutoModelForSequenceClassification`.
ref_model (`PreTrainedModelWrapper`):
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no
reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized.
args (`DPOConfig`):
The DPO config arguments to use for training.
data_collator (`transformers.DataCollator`):
The data collator to use for training. If None is specified, the default data collator (`DataCollatorForPreference`) will be used
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
train_dataset (`datasets.Dataset`):
The dataset to use for training.
eval_dataset (`datasets.Dataset`):
The dataset to use for evaluation.
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
Processing class used to process the data. If provided, will be used to automatically process the inputs
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
reuse the fine-tuned model.
This supercedes the `tokenizer` argument, which is now deprecated.
model_init (`Callable[[], transformers.PreTrainedModel]`):
The model initializer to use for training. If None is specified, the default model initializer will be used.
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
The function to use to compute the metrics. Must take a `EvalPrediction` and return
a dictionary string to metric values.
callbacks (`list[transformers.TrainerCallback]`):
The callbacks to use for training.
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
The optimizer and scheduler to use for training.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
The function to use to preprocess the logits before computing the metrics.
peft_config (`dict`, defaults to `None`):
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
"""
_tag_names = ["trl", "dpo"]
@deprecate_kwarg(
"tokenizer", "0.16.0", "processing_class", warn_if_greater_or_equal_version=True, raise_if_both_names=True
)
def __init__(
self,
model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
args: Optional[DPOConfig] = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
processing_class: Optional[
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
] = None,
model_init: Optional[Callable[[], PreTrainedModel]] = None,
compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
callbacks: Optional[list[TrainerCallback]] = None,
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
peft_config: Optional[dict] = None,
):
if model is None:
raise ValueError("No model provided. Please provide a model to train.")
if not isinstance(model, str) and ref_model is model:
raise ValueError(
"`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
"same as `model`, you must mass a copy of it, or `None` if you use peft."
)
if args.model_init_kwargs is None:
model_init_kwargs = {}
elif not isinstance(model, str):
raise ValueError(
"You passed model_init_kwargs to the DPOTrainer/DPOConfig, but your model is already instantiated."
)
else:
model_init_kwargs = args.model_init_kwargs
torch_dtype = model_init_kwargs.get("torch_dtype")
if torch_dtype is not None:
# Convert to `torch.dtype` if an str is passed
if isinstance(torch_dtype, str) and torch_dtype != "auto":
torch_dtype = getattr(torch, torch_dtype)
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
raise ValueError(
f"Invalid `torch_dtype` passed to the DPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
)
model_init_kwargs["torch_dtype"] = torch_dtype
if args.ref_model_init_kwargs is None:
ref_model_init_kwargs = {}
elif not isinstance(ref_model, str):
raise ValueError(
"You passed ref_model_init_kwargs to the DPOTrainer/DPOConfig, but your ref_model is already instantiated."
)
else:
ref_model_init_kwargs = args.ref_model_init_kwargs
torch_dtype = ref_model_init_kwargs.get("torch_dtype")
if torch_dtype is not None:
# Convert to `torch.dtype` if an str is passed
if isinstance(torch_dtype, str) and torch_dtype != "auto":
torch_dtype = getattr(torch, torch_dtype)
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
raise ValueError(
f"Invalid `torch_dtype` passed to the DPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
)
ref_model_init_kwargs["torch_dtype"] = torch_dtype
if isinstance(model, str):
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
if isinstance(ref_model, str):
ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs)
# Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
# has been called in order to properly call autocast if needed.
self._peft_has_been_casted_to_bf16 = False
if not is_peft_available() and peft_config is not None:
raise ValueError(
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
)
elif is_peft_available() and peft_config is not None:
# if model is a peft model and we have a peft_config, we merge and unload it first
if isinstance(model, PeftModel):
model = model.merge_and_unload()
if ref_model is not None and not args.force_use_ref_model:
raise ValueError(
"You passed both a ref_model and a peft_config. For training PEFT adapters with DPO there is no need to pass a reference"
" model. Please pass `ref_model=None` in case you want to train PEFT adapters, or pass a ref_model with `force_use_ref_model=True` in DPOTrainer's init."
" if you want to use a different ref_model."
)
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
_support_gc_kwargs = hasattr(
args, "gradient_checkpointing_kwargs"
) and "gradient_checkpointing_kwargs" in list(
inspect.signature(prepare_model_for_kbit_training).parameters
)
prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
if _support_gc_kwargs:
prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
elif getattr(args, "gradient_checkpointing", False):
# For backward compatibility with older versions of transformers
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
# get peft model with the given config
model = get_peft_model(model, peft_config)
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
peft_module_casting_to_bf16(model)
# If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
self._peft_has_been_casted_to_bf16 = True
# For models that use gradient_checkpointing, we need to attach a hook that enables input
# to explicitly have `requires_grad=True`, otherwise training will either silently
# fail or completely fail.
elif getattr(args, "gradient_checkpointing", False):
# For backward compatibility with older versions of transformers
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
raise ValueError(
"`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
" Please install `wandb` or `comet-ml` to resolve."
)
self.is_encoder_decoder = model.config.is_encoder_decoder
self.is_vision_model = model.config.model_type in MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES.keys()
self.is_peft_model = is_peft_available() and isinstance(model, PeftModel)
self.model_adapter_name = args.model_adapter_name
self.ref_adapter_name = args.ref_adapter_name
self.reference_free = args.reference_free
if ref_model:
self.ref_model = ref_model
elif self.is_peft_model or args.precompute_ref_log_probs:
# The `model` with adapters turned off will be used as the reference model
self.ref_model = None
else:
self.ref_model = create_reference_model(model)
if processing_class is None:
raise ValueError("processing_class must be specified to tokenize a DPO dataset.")
if args.padding_value is not None:
self.padding_value = args.padding_value
else:
if hasattr(processing_class, "pad_token_id") and processing_class.pad_token_id is not None:
self.padding_value = processing_class.pad_token_id
elif hasattr(processing_class, "tokenizer") and processing_class.tokenizer.pad_token_id is not None:
self.padding_value = processing_class.tokenizer.pad_token_id
else:
raise ValueError(
"`padding_value` is not specified in `DPOConfig`, and `pad_token_id` is missing in the "
"`processing_class`. Please either set the `padding_value` argument in `DPOConfig`, or set "
"`tokenizer.pad_token` (e.g., `tokenizer.pad_token = tokenizer.eos_token`) before instantiating "
"the trainer."
)
if data_collator is None:
data_collator = DataCollatorForPreference(pad_token_id=self.padding_value)
# Disable dropout in the model and reference model
if args.disable_dropout:
disable_dropout_in_model(model)
if self.ref_model is not None:
disable_dropout_in_model(self.ref_model)
self.generate_during_eval = args.generate_during_eval
self.label_pad_token_id = args.label_pad_token_id
self.max_prompt_length = args.max_prompt_length
self.max_completion_length = args.max_completion_length
self.max_length = args.max_length
self.truncation_mode = args.truncation_mode
self.precompute_ref_log_probs = args.precompute_ref_log_probs
self.use_logits_to_keep = args.use_logits_to_keep
if args.padding_free:
if model.config._attn_implementation != "flash_attention_2":
warnings.warn(
"Padding-free training is enabled, but the attention implementation is not set to "
"'flash_attention_2'. Padding-free training flattens batches into a single sequence, and "
"'flash_attention_2' is the only known attention mechanism that reliably supports this. Using "
"other implementations may lead to unexpected behavior. To ensure compatibility, set "
"`attn_implementation='flash_attention_2'` in the model configuration, or verify that your "
"attention mechanism can handle flattened sequences."
)
self.padding_free = args.padding_free
# Since ref_logs are precomputed on the first call to get_train/eval_dataloader
# keep track of first called to avoid computation of future calls
self._precomputed_train_ref_log_probs = False
self._precomputed_eval_ref_log_probs = False
if (
args.loss_type in ["hinge", "ipo", "bco_pair", "sppo_hard", "nca_pair", "apo_zero", "apo_down"]
and args.label_smoothing > 0
):
warnings.warn(
f"You are using the {args.loss_type} loss type that does not support label smoothing. The "
"`label_smoothing` parameter will be ignored. Set `label_smoothing` to `0.0` to remove this warning.",
UserWarning,
)
if args.loss_type == "kto_pair":
raise ValueError("Support for kto_pair has been removed in DPOTrainer. Please use KTOTrainer.")
self.beta = args.beta
self.label_smoothing = args.label_smoothing
self.loss_type = args.loss_type
self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
self.use_weighting = args.use_weighting
self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
warnings.warn(
"You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
"`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
"greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
"loss.",
UserWarning,
)
self._stored_metrics = defaultdict(lambda: defaultdict(list))
self.f_divergence_type = args.f_divergence_type
self.f_divergence_params = {FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY: args.f_alpha_divergence_coef}
self.dataset_num_proc = args.dataset_num_proc
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
# input tensor associated with the key "input_ids". However, in DPO, the sampled data does not include the
# "input_ids" key. Instead, the available keys are "prompt_input_ids", "chosen_input_ids", and
# "rejected_input_ids". As a result, the trainer issues the warning: "Could not estimate the number of tokens
# of the input, floating-point operations will not be computed." To suppress this warning, we set the
# "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate
# that the warning has already been issued.
model.warnings_issued["estimate_tokens"] = True
# Dataset preparation
train_dataset = self._prepare_dataset(train_dataset, processing_class, args, "train")
if eval_dataset is not None:
if isinstance(eval_dataset, dict):
eval_dataset = {
key: self._prepare_dataset(dataset, processing_class, args, key)
for key, dataset in eval_dataset.items()
}
else:
eval_dataset = self._prepare_dataset(eval_dataset, processing_class, args, "eval")
super().__init__(
model=model,
args=args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
processing_class=processing_class,
model_init=model_init,
compute_metrics=compute_metrics,
callbacks=callbacks,
optimizers=optimizers,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
# Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
# model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
# self.model_accepts_loss_kwargs to False to enable scaling.
self.model_accepts_loss_kwargs = False
# Add tags for models that have been loaded with the correct transformers version
if hasattr(self.model, "add_model_tags"):
self.model.add_model_tags(self._tag_names)
if not hasattr(self, "accelerator"):
raise AttributeError(
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
)
# Deepspeed Zero-3 does not support precompute_ref_log_probs
if self.is_deepspeed_enabled:
if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs:
raise ValueError(
"You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`."
)
if self.ref_model is None:
if not (self.is_peft_model or self.precompute_ref_log_probs):
raise ValueError(
"No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`"
)
if args.sync_ref_model:
raise ValueError(
"You currently cannot use `ref_model=None` with TR-DPO method. Please provide `ref_model`."
)
else:
if self.is_deepspeed_enabled:
self.ref_model = self._prepare_deepspeed(self.ref_model)
else:
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
if args.sync_ref_model:
if self.precompute_ref_log_probs:
raise ValueError(
"You cannot use `precompute_ref_log_probs=True` with TR-DPO method. Please set `precompute_ref_log_probs=False`."
)
self.add_callback(SyncRefModelCallback(ref_model=self.ref_model, accelerator=self.accelerator))
if self.loss_type == "bco_pair":
self.running = RunningMoments(self.accelerator)
def _prepare_dataset(
self,
dataset: Union[Dataset, IterableDataset],
processing_class: Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin],
args: DPOConfig,
dataset_name: str,
) -> Union[Dataset, IterableDataset]:
# Build the kwargs for the `map` function
map_kwargs = {"writer_batch_size": 10}
if isinstance(dataset, Dataset): # IterableDataset does not support num_proc
map_kwargs["num_proc"] = args.dataset_num_proc
with PartialState().local_main_process_first():
# Extract prompt if needed
if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc`
map_kwargs["desc"] = f"Extracting prompt in {dataset_name} dataset"
dataset = dataset.map(maybe_extract_prompt, **map_kwargs)
# Apply the chat template if needed
if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc`
map_kwargs["desc"] = f"Applying chat template to {dataset_name} dataset"
dataset = dataset.map(
maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class, "tools": args.tools}, **map_kwargs
)
# Tokenize the dataset
if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc`
map_kwargs["desc"] = f"Tokenizing {dataset_name} dataset"
dataset = dataset.map(
self.tokenize_row if not self.is_vision_model else self.process_row,
remove_columns=["prompt", "chosen", "rejected"],
fn_kwargs={
"processing_class": processing_class,
"max_prompt_length": args.max_prompt_length,
"max_completion_length": args.max_completion_length,
# for enc-dec, we add the special tokens ([bos_token] + prompt + [eos_token]; completion + [eos_token])
"add_special_tokens": False,
},
**map_kwargs,
)
return dataset
@staticmethod
def tokenize_row(features, processing_class, max_prompt_length, max_completion_length, add_special_tokens):
"""
Tokenize a row of the dataset.
Args:
features (`dict[str, str]`):
Row of the dataset, should contain the keys `"prompt"`, `"chosen"`, and `"rejected"`.
processing_class (`PreTrainedTokenizerBase`):
Processing class used to process the data.
max_prompt_length (`int` or `None`):
Maximum length of the prompt sequence. If `None`, the prompt sequence is not truncated.
max_completion_length (`int` or `None`):
Maximum length of the completion sequences. If `None`, the completion sequences are not truncated.
add_special_tokens (`bool`):
Whether to add special tokens to the sequences. Typically used for encoder-decoder models. If `True`,
the prompt sequence will have a bos token prepended and an eos token appended. In any case, the
completion sequences will have an eos token appended.
Returns:
`dict[str, list[int]]`:
Tokenized sequences with the keys `"prompt_input_ids"`, `"chosen_input_ids"`, and
`"rejected_input_ids".
Example:
```python
>>> from transformers import GPT2Tokenizer
>>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
>>> features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"}
>>> DPOTrainer.tokenize_row(
... features, tokenizer, max_prompt_length=3, max_completion_length=3, add_special_tokens=False
... )
{'prompt_input_ids': [464, 6766, 318], 'chosen_input_ids': [4171, 50256], 'rejected_input_ids': [4077, 50256]}
```
"""
tokenizer = processing_class # the processing class is a tokenizer
prompt_input_ids = tokenizer(features["prompt"], add_special_tokens=False)["input_ids"]
chosen_input_ids = tokenizer(features["chosen"], add_special_tokens=False)["input_ids"]
rejected_input_ids = tokenizer(features["rejected"], add_special_tokens=False)["input_ids"]
# Add special tokens (typically for encoder-decoder models)
if add_special_tokens:
if tokenizer.bos_token_id is not None:
prompt_input_ids = [tokenizer.bos_token_id] + prompt_input_ids
if tokenizer.eos_token_id is not None:
prompt_input_ids = prompt_input_ids + [tokenizer.eos_token_id]
chosen_input_ids = chosen_input_ids + [tokenizer.eos_token_id]
rejected_input_ids = rejected_input_ids + [tokenizer.eos_token_id]
# Truncate prompt and completion sequences
if max_prompt_length is not None:
prompt_input_ids = prompt_input_ids[-max_prompt_length:]
if max_completion_length is not None:
chosen_input_ids = chosen_input_ids[:max_completion_length]
rejected_input_ids = rejected_input_ids[:max_completion_length]
return {
"prompt_input_ids": prompt_input_ids,
"chosen_input_ids": chosen_input_ids,
"rejected_input_ids": rejected_input_ids,
}
@staticmethod
def process_row(features, processing_class, max_prompt_length, max_completion_length, add_special_tokens):
"""
Same as `tokenize_row` but for vision models. Please refer to `tokenize_row` for more information.
"""
processor, tokenizer = processing_class, processing_class.tokenizer # the processing class is a processor
processed_features = processor(images=features["images"], text=features["prompt"], add_special_tokens=False)
prompt_input_ids = processed_features["input_ids"][0]
pixel_values = processed_features["pixel_values"][0]
chosen_input_ids = tokenizer(features["chosen"], add_special_tokens=False)["input_ids"]
rejected_input_ids = tokenizer(features["rejected"], add_special_tokens=False)["input_ids"]
# Add special tokens (typically for encoder-decoder models)
if add_special_tokens:
if tokenizer.bos_token_id is not None:
prompt_input_ids = [tokenizer.bos_token_id] + prompt_input_ids
if tokenizer.eos_token_id is not None:
prompt_input_ids = prompt_input_ids + [tokenizer.eos_token_id]
chosen_input_ids = chosen_input_ids + [tokenizer.eos_token_id]
rejected_input_ids = rejected_input_ids + [tokenizer.eos_token_id]
# Truncate prompt and completion sequences
if max_prompt_length is not None:
prompt_input_ids = prompt_input_ids[-max_prompt_length:]
if max_completion_length is not None:
chosen_input_ids = chosen_input_ids[:max_completion_length]
rejected_input_ids = rejected_input_ids[:max_completion_length]
output = {
"prompt_input_ids": prompt_input_ids,
"pixel_values": pixel_values,
"chosen_input_ids": chosen_input_ids,
"rejected_input_ids": rejected_input_ids,
}
if "pixel_attention_mask" in processed_features:
output["pixel_attention_mask"] = processed_features["pixel_attention_mask"][0]
if "image_sizes" in processed_features:
output["image_sizes"] = processed_features["image_sizes"][0]
return output
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
# Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config)
if model is not None:
if hasattr(model, "config"):
hidden_size = (
max(model.config.hidden_sizes)
if getattr(model.config, "hidden_sizes", None)
else getattr(model.config, "hidden_size", None)
)
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
config_kwargs.update(
{
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
}
)
# If ZeRO-3 is used, we shard both the active and reference model.
# Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
if config_kwargs["zero_optimization"]["stage"] != 3:
config_kwargs["zero_optimization"]["stage"] = 0
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
model.eval()
return model
def _set_signature_columns_if_needed(self):
# If `self.args.remove_unused_columns` is True, non-signature columns are removed.
# By default, this method sets `self._signature_columns` to the model's expected inputs.
# In DPOTrainer, we preprocess data, so using the model's signature columns doesn't work.
# Instead, we set them to the columns expected by `DataCollatorForPreference`, hence the override.
if self._signature_columns is None:
self._signature_columns = [
"prompt_input_ids",
"chosen_input_ids",
"rejected_input_ids",
"image_sizes",
"ref_chosen_logps",
"ref_rejected_logps",
]
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training [`~torch.utils.data.DataLoader`].
Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`.
"""
if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs:
batch_size = self.args.precompute_ref_batch_size or self.args.per_device_train_batch_size
dataloader_params = {
"batch_size": batch_size,
"collate_fn": self.data_collator,
"num_workers": self.args.dataloader_num_workers,
"pin_memory": self.args.dataloader_pin_memory,
"shuffle": False,
}
# prepare dataloader
data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params))
ref_chosen_logps = []
ref_rejected_logps = []
for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"):
ref_chosen_logp, ref_rejected_logp = self.compute_ref_log_probs(padded_batch)
ref_chosen_logp, ref_rejected_logp = self.accelerator.gather_for_metrics(
(ref_chosen_logp, ref_rejected_logp)
)
ref_chosen_logps.append(ref_chosen_logp.cpu())
ref_rejected_logps.append(ref_rejected_logp.cpu())
# Unnecessary cache clearing to avoid OOM
empty_cache()
self.accelerator.free_memory()
all_ref_chosen_logps = torch.cat(ref_chosen_logps).float().numpy()
all_ref_rejected_logps = torch.cat(ref_rejected_logps).float().numpy()
self.train_dataset = self.train_dataset.add_column(name="ref_chosen_logps", column=all_ref_chosen_logps)
self.train_dataset = self.train_dataset.add_column(
name="ref_rejected_logps", column=all_ref_rejected_logps
)
self._precomputed_train_ref_log_probs = True
return super().get_train_dataloader()
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation [`~torch.utils.data.DataLoader`].
Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`.
Args:
eval_dataset (`torch.utils.data.Dataset`, *optional*):
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
by the `model.forward()` method are automatically removed. It must implement `__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs:
batch_size = self.args.precompute_ref_batch_size or self.args.per_device_eval_batch_size
dataloader_params = {
"batch_size": batch_size,
"collate_fn": self.data_collator,
"num_workers": self.args.dataloader_num_workers,
"pin_memory": self.args.dataloader_pin_memory,
"shuffle": False,
}
# prepare dataloader
data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params))
ref_chosen_logps = []
ref_rejected_logps = []
for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"):
ref_chosen_logp, ref_rejected_logp = self.compute_ref_log_probs(padded_batch)
ref_chosen_logp, ref_rejected_logp = self.accelerator.gather_for_metrics(
(ref_chosen_logp, ref_rejected_logp)
)
ref_chosen_logps.append(ref_chosen_logp.cpu())
ref_rejected_logps.append(ref_rejected_logp.cpu())
all_ref_chosen_logps = torch.cat(ref_chosen_logps).float().numpy()
all_ref_rejected_logps = torch.cat(ref_rejected_logps).float().numpy()
eval_dataset = eval_dataset.add_column(name="ref_chosen_logps", column=all_ref_chosen_logps)
eval_dataset = eval_dataset.add_column(name="ref_rejected_logps", column=all_ref_rejected_logps)
# Save calculated ref_chosen_logps and ref_rejected_logps to the eval_dataset for subsequent runs
if self.eval_dataset is not None:
self.eval_dataset = eval_dataset
self._precomputed_eval_ref_log_probs = True
return super().get_eval_dataloader(eval_dataset=eval_dataset)
@contextmanager
def null_ref_context(self):
"""Context manager for handling null reference model (that is, peft adapter manipulation)."""
with (
self.accelerator.unwrap_model(self.model).disable_adapter()
if self.is_peft_model and not self.ref_adapter_name
else nullcontext()
):
if self.ref_adapter_name:
self.model.set_adapter(self.ref_adapter_name)
yield
if self.ref_adapter_name:
self.model.set_adapter(self.model_adapter_name or "default")
def compute_ref_log_probs(self, batch: dict[str, torch.LongTensor]) -> dict:
"""Computes log probabilities of the reference model for a single padded batch of a DPO specific dataset."""
device_type = "xpu" if is_torch_xpu_available() else "cuda"
compte_ref_context_manager = amp.autocast(device_type) if self._peft_has_been_casted_to_bf16 else nullcontext()
with torch.no_grad(), compte_ref_context_manager:
if self.ref_model is None:
with self.null_ref_context():
ref_model_output = self.concatenated_forward(self.model, batch)
else:
ref_model_output = self.concatenated_forward(self.ref_model, batch)
return ref_model_output["chosen_logps"], ref_model_output["rejected_logps"]
@staticmethod
def concatenated_inputs(
batch: dict[str, Union[list, torch.LongTensor]], padding_value: int
) -> dict[str, torch.LongTensor]:
"""
Concatenate the `chosen` and `rejected` inputs from the batch into a single tensor for both the prompt
and completion sequences.
Args:
batch (`dict[str, Union[list, torch.LongTensor]]`):
A batch of input data. The batch must contain the following keys:
- `"prompt_input_ids"`: Tensor of shape `(batch_size, prompt_length)` representing the prompt input IDs.
- `"chosen_input_ids"`: Tensor of shape `(batch_size, chosen_length)` representing the chosen completion input IDs.
- `"rejected_input_ids"`: Tensor of shape `(batch_size, rejected_length)` representing the rejected completion input IDs.
- `"prompt_pixel_values"` (optional): Tensor for pixel values, if available.
- `"prompt_pixel_attention_mask"` (optional): Tensor for pixel attention masks, if available.
padding_value (`int`):
The padding value to use for the concatenated completion sequences (`chosen_input_ids` and
`rejected_input_ids`).
Returns:
`dict[str, torch.LongTensor]`: A dictionary containing:
- `"prompt_input_ids"`: Concatenated prompt input IDs of shape `(2 * batch_size, prompt_length)`.
- `"completion_input_ids"`: Concatenated chosen and rejected completion input IDs of shape `(2 * batch_size, max_completion_length)`.
- `"prompt_attention_mask"`: Concatenated prompt attention masks of shape `(2 * batch_size, prompt_length)`.
- `"completion_attention_mask"`: Concatenated chosen and rejected attention masks of shape `(2 * batch_size, max_completion_length)`.
- `"pixel_values"` (optional): Concatenated pixel values if `"prompt_pixel_values"` are present.
- `"pixel_attention_mask"` (optional): Concatenated pixel attention masks if `"prompt_pixel_attention_mask"` are present.
Notes:
The completion input IDs and attention masks are padded to the maximum completion length of the chosen
or rejected sequences.
"""
output = {}
# For the prompt, the input_ids are the same for both the chosen and rejected responses
output["prompt_input_ids"] = torch.cat([batch["prompt_input_ids"], batch["prompt_input_ids"]], dim=0)
output["prompt_attention_mask"] = torch.cat(
[batch["prompt_attention_mask"], batch["prompt_attention_mask"]], dim=0
)
if "pixel_values" in batch:
output["pixel_values"] = torch.cat([batch["pixel_values"], batch["pixel_values"]], dim=0)
if "pixel_attention_mask" in batch:
output["pixel_attention_mask"] = torch.cat(
[batch["pixel_attention_mask"], batch["pixel_attention_mask"]], dim=0
)
if "image_sizes" in batch:
output["image_sizes"] = torch.cat([batch["image_sizes"], batch["image_sizes"]], dim=0)
# Concatenate the chosen and rejected completions
max_completion_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1])
output["completion_input_ids"] = torch.cat(
(
pad_to_length(batch["chosen_input_ids"], max_completion_length, pad_value=padding_value),
pad_to_length(batch["rejected_input_ids"], max_completion_length, pad_value=padding_value),
),
)
output["completion_attention_mask"] = torch.cat(
(
pad_to_length(batch["chosen_attention_mask"], max_completion_length, pad_value=0),
pad_to_length(batch["rejected_attention_mask"], max_completion_length, pad_value=0),
),
)
return output
def dpo_loss(
self,
chosen_logps: torch.FloatTensor,
rejected_logps: torch.FloatTensor,
ref_chosen_logps: torch.FloatTensor,
ref_rejected_logps: torch.FloatTensor,
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""
Compute the DPO loss for a batch of policy and reference model log probabilities.
Args:
chosen_logps (`torch.FloatTensor`):
Log probabilities of the model for the chosen responses. Shape: `(batch_size,)`.
rejected_logps (`torch.FloatTensor`):
Log probabilities of the model for the rejected responses. Shape: `(batch_size,)`.
ref_chosen_logps (`torch.FloatTensor`):
Log probabilities of the reference model for the chosen responses. Shape: `(batch_size,)`.
ref_rejected_logps (`torch.FloatTensor`):
Log probabilities of the reference model for the rejected responses. Shape: `(batch_size,)`.
Returns:
A tuple of three tensors: `(losses, chosen_rewards, rejected_rewards)`.
The losses tensor contains the DPO loss for each example in the batch.
The `chosen_rewards` and `rejected_rewards` tensors contain the rewards for the chosen and rejected
responses, respectively.
"""
device = self.accelerator.device
# Get the log ratios for the chosen and rejected responses
chosen_logratios = chosen_logps.to(device) - (not self.reference_free) * ref_chosen_logps.to(device)
rejected_logratios = rejected_logps.to(device) - (not self.reference_free) * ref_rejected_logps.to(device)
if self.f_divergence_type == FDivergenceType.ALPHA_DIVERGENCE.value:
# The alpha-divergence formula: (1 - u^-alpha) / alpha
# The divergence difference between the chosen and rejected sample is:
# (1 - u[w]^-alpha) / alpha - (1 - u[l]^-alpha) / alpha
# = (u[l]^-alpha - u[w]^-alpha) / alpha
# where u[w] and u[l] are the policy/reference probability ratios
# for the chosen and rejected samples, respectively.
alpha_coef = FDivergenceConstants.ALPHA_DIVERGENCE_COEF_DEFAULT
if self.f_divergence_params and FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY in self.f_divergence_params:
alpha_coef = float(self.f_divergence_params[FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY])
logits = (cap_exp(rejected_logratios * -alpha_coef) - cap_exp(chosen_logratios * -alpha_coef)) / alpha_coef
else:
logratios = chosen_logps - rejected_logps
if self.reference_free:
ref_logratios = torch.tensor([0], dtype=logratios.dtype, device=logratios.device)
else:
ref_logratios = ref_chosen_logps - ref_rejected_logps
logratios = logratios.to(self.accelerator.device)
ref_logratios = ref_logratios.to(self.accelerator.device)
logits = logratios - ref_logratios
if self.f_divergence_type == FDivergenceType.JS_DIVERGENCE.value:
# The js-divergence formula: log(2 * u / (1 + u))
# The divergence difference between the chosen and rejected sample is:
# log(2 * u[w] / (1 + u[w])) - log(2 * u[l] / (1 + u[l]))
# = log(u[w]) - log(u[l]) - (log(1 + u[w]) - log(1 + u[l]))
# where u[w] and u[l] are the policy/reference probability ratios
# for the chosen and rejected samples, respectively.
logits -= F.softplus(chosen_logratios) - F.softplus(rejected_logratios)
# The beta is a temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5.
# We ignore the reference model as beta -> 0. The label_smoothing parameter encodes our uncertainty about the
# labels and calculates a conservative DPO loss.
if self.loss_type == "sigmoid":
losses = (
-F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
- F.logsigmoid(-self.beta * logits) * self.label_smoothing
)
elif self.loss_type == "robust":
losses = (
-F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
+ F.logsigmoid(-self.beta * logits) * self.label_smoothing
) / (1 - 2 * self.label_smoothing)
elif self.loss_type == "exo_pair":
# eqn (16) of the EXO paper: https://huggingface.co/papers/2402.00856
import math
if self.label_smoothing == 0:
self.label_smoothing = 1e-3
losses = (self.beta * logits).sigmoid() * (
F.logsigmoid(self.beta * logits) - math.log(1 - self.label_smoothing)
) + (-self.beta * logits).sigmoid() * (F.logsigmoid(-self.beta * logits) - math.log(self.label_smoothing))
elif self.loss_type == "hinge":
losses = torch.relu(1 - self.beta * logits)
elif self.loss_type == "ipo":
# eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper.
losses = (logits - 1 / (2 * self.beta)) ** 2
elif self.loss_type == "bco_pair":
chosen_logratios = chosen_logps - ref_chosen_logps
rejected_logratios = rejected_logps - ref_rejected_logps
chosen_rewards = self.beta * chosen_logratios
rejected_rewards = self.beta * rejected_logratios
rewards = torch.cat((chosen_rewards, rejected_rewards), 0).mean().detach()
self.running.update(rewards)
delta = self.running.mean
losses = -F.logsigmoid((self.beta * chosen_logratios) - delta) - F.logsigmoid(
-(self.beta * rejected_logratios - delta)
)
elif self.loss_type == "sppo_hard":
# In the paper (https://huggingface.co/papers/2405.00675), SPPO employs a soft probability approach,
# estimated using the PairRM score. The probability calculation is conducted outside of the trainer class.
# The version described here is the hard probability version, where P in Equation (4.7) of Algorithm 1 is
# set to 1 for the winner and 0 for the loser.
a = chosen_logps - ref_chosen_logps
b = rejected_logps - ref_rejected_logps
losses = (a - 0.5 / self.beta) ** 2 + (b + 0.5 / self.beta) ** 2
elif self.loss_type == "nca_pair":
chosen_rewards = (chosen_logps - ref_chosen_logps) * self.beta
rejected_rewards = (rejected_logps - ref_rejected_logps) * self.beta
losses = (
-F.logsigmoid(chosen_rewards)
- 0.5 * F.logsigmoid(-chosen_rewards)
- 0.5 * F.logsigmoid(-rejected_rewards)
)
elif self.loss_type == "aot_pair":
chosen_logratios = chosen_logps - ref_chosen_logps
rejected_logratios = rejected_logps - ref_rejected_logps
chosen_logratios_sorted, _ = torch.sort(chosen_logratios, dim=0)
rejected_logratios_sorted, _ = torch.sort(rejected_logratios, dim=0)
delta = chosen_logratios_sorted - rejected_logratios_sorted
losses = (
-F.logsigmoid(self.beta * delta) * (1 - self.label_smoothing)
- F.logsigmoid(-self.beta * delta) * self.label_smoothing
)
elif self.loss_type == "aot":
logratios = chosen_logps - rejected_logps
ref_logratios = ref_chosen_logps - ref_rejected_logps
logratios_sorted, _ = torch.sort(logratios, dim=0)
ref_logratios_sorted, _ = torch.sort(ref_logratios, dim=0)
delta = logratios_sorted - ref_logratios_sorted
losses = (
-F.logsigmoid(self.beta * delta) * (1 - self.label_smoothing)
- F.logsigmoid(-self.beta * delta) * self.label_smoothing
)
elif self.loss_type == "apo_zero":
# Eqn (7) of the APO paper (https://huggingface.co/papers/2408.06266)
# Use this loss when you believe the chosen outputs are better than your model's default output
losses_chosen = 1 - F.sigmoid(self.beta * chosen_logratios) # Increase chosen likelihood
losses_rejected = F.sigmoid(self.beta * rejected_logratios) # Decrease rejected likelihood
losses = losses_chosen + losses_rejected
elif self.loss_type == "apo_down":
# Eqn (8) of the APO paper (https://huggingface.co/papers/2408.06266)
# Use this loss when you believe the chosen outputs are worse than your model's default output.
# Decrease chosen likelihood and decrease rejected likelihood more
losses_chosen = F.sigmoid(self.beta * chosen_logratios)
losses_rejected = 1 - F.sigmoid(self.beta * (chosen_logratios - rejected_logratios))
losses = losses_chosen + losses_rejected
elif self.loss_type == "discopop":
# Eqn (5) of the DiscoPOP paper (https://huggingface.co/papers/2406.08414)
# This loss was discovered with LLM discovery
logratios = chosen_logps - rejected_logps
ref_logratios = ref_chosen_logps - ref_rejected_logps
logits = logratios - ref_logratios
logits = logits * self.beta
# Modulate the mixing coefficient based on the log ratio magnitudes
log_ratio_modulation = torch.sigmoid(logits / self.args.discopop_tau)
logistic_component = -F.logsigmoid(logits)
exp_component = torch.exp(-logits)
# Blend between logistic and exponential component based on log ratio modulation
losses = logistic_component * (1 - log_ratio_modulation) + exp_component * log_ratio_modulation
else:
raise ValueError(
f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'exo_pair', "
"'nca_pair', 'robust', 'bco_pair', 'sppo_hard', 'aot', 'aot_pair', 'discopop', 'apo_zero', 'apo_down']"
)
chosen_rewards = self.beta * (chosen_logps.to(device) - ref_chosen_logps.to(device)).detach()
rejected_rewards = self.beta * (rejected_logps.to(device) - ref_rejected_logps.to(device)).detach()
return losses, chosen_rewards, rejected_rewards
def concatenated_forward(self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]):
"""Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
We do this to avoid doing two forward passes, because it's faster for FSDP.
"""
num_examples = batch["prompt_input_ids"].shape[0]
concatenated_batch = self.concatenated_inputs(batch, padding_value=self.padding_value)
model_kwargs = {}
if self.aux_loss_enabled:
model_kwargs["output_router_logits"] = True
# Add the pixel values and attention masks for vision models
if "pixel_values" in concatenated_batch:
model_kwargs["pixel_values"] = concatenated_batch["pixel_values"]
if "pixel_attention_mask" in concatenated_batch:
model_kwargs["pixel_attention_mask"] = concatenated_batch["pixel_attention_mask"]
if "image_sizes" in concatenated_batch:
model_kwargs["image_sizes"] = concatenated_batch["image_sizes"]
prompt_input_ids = concatenated_batch["prompt_input_ids"]
prompt_attention_mask = concatenated_batch["prompt_attention_mask"]
completion_input_ids = concatenated_batch["completion_input_ids"]
completion_attention_mask = concatenated_batch["completion_attention_mask"]
if self.is_encoder_decoder:
labels = completion_input_ids
labels[completion_attention_mask == 0] = self.label_pad_token_id
outputs = model(
input_ids=prompt_input_ids,
attention_mask=prompt_attention_mask,
labels=labels, # we need the labels for the logits to be returned
**model_kwargs,
)
logits = outputs.logits
loss_mask = completion_attention_mask.bool()
else:
# Concatenate the prompt and completion inputs
input_ids = torch.cat((prompt_input_ids, completion_input_ids), dim=1)
attention_mask = torch.cat((prompt_attention_mask, completion_attention_mask), dim=1)
# Mask the prompt but not the completion for the loss
loss_mask = torch.cat(
(torch.zeros_like(prompt_attention_mask), completion_attention_mask),
dim=1,
)
# Flush left to reduce the memory usage
# [[0, 0, x, x, x, x], -> [[x, x, x, x],
# [0, x, x, x, 0, 0]] [x, x, x, 0]]
attention_mask, input_ids, loss_mask = flush_left(attention_mask, input_ids, loss_mask)
# Truncate right
if self.max_length is not None:
if self.truncation_mode == "keep_end":
input_ids = input_ids[:, -self.max_length :]
attention_mask = attention_mask[:, -self.max_length :]
loss_mask = loss_mask[:, -self.max_length :]
elif self.truncation_mode == "keep_start":
input_ids = input_ids[:, : self.max_length]
attention_mask = attention_mask[:, : self.max_length]
loss_mask = loss_mask[:, : self.max_length]
else:
raise ValueError(
f"Unknown truncation mode: '{self.truncation_mode}'. Should be one of ['keep_end', "
"'keep_start']."
)
if self.use_logits_to_keep:
# Compute logits_to_keep based on loss_mask pattern:
# [[0, 0, 0, x, x, x, x],
# [0, 0, 0, x, x, x, 0]]
# ^ start computing logits from here ([:, -(7-3+1):])
first_compute_index = loss_mask.nonzero(as_tuple=True)[1].min()
logits_to_keep = (loss_mask.shape[1] - first_compute_index).item() + 1 # +1 for the first label
model_kwargs["logits_to_keep"] = logits_to_keep
if self.padding_free:
# Flatten the input_ids, position_ids, and loss_mask
# input_ids = [[a, b, c, 0], -> input_ids = [[a, b, c, d, e, f, g]]
# [d, e, f, g]] position_ids = [[0, 1, 2, 0, 1, 2, 3]]
input_ids = input_ids[attention_mask.bool()].unsqueeze(0)
loss_mask = loss_mask[attention_mask.bool()].unsqueeze(0)
position_ids = attention_mask.cumsum(1)[attention_mask.bool()].unsqueeze(0) - 1
model_kwargs["position_ids"] = position_ids
else:
model_kwargs["attention_mask"] = attention_mask
outputs = model(input_ids, **model_kwargs)
logits = outputs.logits
# Offset the logits by one to align with the labels
labels = torch.roll(input_ids, shifts=-1, dims=1)
loss_mask = torch.roll(loss_mask, shifts=-1, dims=1).bool()
if self.use_logits_to_keep:
# Align labels with logits
# logits: -, -, [x2, x3, x4, x5, x6]
# ^ --------- ^ after logits[:, :-1, :]
# labels: [y0, y1, y2, y3, y4, y5, y6]
# ^ --------- ^ with logits_to_keep=4, [:, -4:]
# loss_mask: [0, 0, 0, 1, 1, 1, 1]
labels = labels[:, -logits_to_keep:]
loss_mask = loss_mask[:, -logits_to_keep:]
if logits.shape[:2] != labels.shape[:2]:
# for llava, the returned logits include the image tokens (placed before the text tokens)
seq_len = labels.shape[1]
logits = logits[:, -seq_len:]
# Compute the log probabilities of the labels
labels[~loss_mask] = 0 # dummy token; we'll ignore the losses on these tokens later
per_token_logps = selective_log_softmax(logits, labels)
per_token_logps[~loss_mask] = 0
per_token_logps = torch.roll(per_token_logps, shifts=1, dims=1)
if self.padding_free:
# Unflatten the per_token_logps (shape: [1, sum_seq_len] -> [batch_size, seq_len])
batch_size, seq_len = attention_mask.shape
per_token_logps_ = torch.zeros(
batch_size, seq_len, device=outputs.logits.device, dtype=outputs.logits.dtype
)
per_token_logps_[attention_mask.bool()] = per_token_logps
per_token_logps = per_token_logps_
all_logps = per_token_logps.sum(-1)
output = {}
if self.use_weighting:
with torch.no_grad():
# Eq (2) of the WPO paper: https://huggingface.co/papers/2406.11827
logprobs = F.log_softmax(logits, dim=-1)
weights_adjustment_factor = torch.logsumexp(2 * logprobs, dim=-1) # same as sum(probs**2) in log space
per_token_logps_adjusted = per_token_logps - weights_adjustment_factor
all_weights = (per_token_logps_adjusted * loss_mask).sum(-1) / loss_mask.sum(-1)
chosen_weights = all_weights[:num_examples]
rejected_weights = all_weights[num_examples:]
output["policy_weights"] = torch.clamp(torch.exp(chosen_weights + rejected_weights), max=1)
if self.args.rpo_alpha is not None:
# Only use the chosen logits for the RPO loss
chosen_logits = logits[:num_examples]
chosen_labels = labels[:num_examples]
# Compute the log probabilities of the labels
output["nll_loss"] = F.cross_entropy(
torch.flatten(chosen_logits, end_dim=1), torch.flatten(chosen_labels, end_dim=1), ignore_index=0
)
if self.loss_type == "ipo":
all_logps = all_logps / loss_mask.sum(-1)
output["chosen_logps"] = all_logps[:num_examples]
output["rejected_logps"] = all_logps[num_examples:]
# Compute the mean logits
if self.padding_free:
# position_ids contains a sequence of range identifiers (e.g., [[0, 1, 2, 0, 1, 2, 3, ...]]).
# There are 2*num_examples ranges in total: the first half corresponds to the chosen tokens,
# and the second half to the rejected tokens.
# To find the start of the rejected tokens, we look for the num_examples+1-th zero in pos_id.
split_idx = (position_ids == 0).nonzero(as_tuple=True)[1][num_examples]
mean_chosen_logits = logits[0, :split_idx][loss_mask[0, :split_idx]].mean()
mean_rejected_logits = logits[0, split_idx:][loss_mask[0, split_idx:]].mean()
else:
mean_chosen_logits = logits[:num_examples][loss_mask[:num_examples]].mean()
mean_rejected_logits = logits[num_examples:][loss_mask[num_examples:]].mean()
output["mean_chosen_logits"] = mean_chosen_logits
output["mean_rejected_logits"] = mean_rejected_logits
if self.aux_loss_enabled:
output["aux_loss"] = outputs.aux_loss
return output
def get_batch_loss_metrics(
self,
model,
batch: dict[str, Union[list, torch.LongTensor]],
train_eval: Literal["train", "eval"] = "train",
):
"""Compute the DPO loss and other metrics for the given batch of inputs for train or test."""
metrics = {}
model_output = self.concatenated_forward(model, batch)
# if ref_chosen_logps and ref_rejected_logps in batch use them, otherwise use the reference model
if "ref_chosen_logps" in batch and "ref_rejected_logps" in batch:
ref_chosen_logps = batch["ref_chosen_logps"]
ref_rejected_logps = batch["ref_rejected_logps"]
else:
ref_chosen_logps, ref_rejected_logps = self.compute_ref_log_probs(batch)
losses, chosen_rewards, rejected_rewards = self.dpo_loss(
model_output["chosen_logps"], model_output["rejected_logps"], ref_chosen_logps, ref_rejected_logps
)
reward_accuracies = (chosen_rewards > rejected_rewards).float()
if self.args.rpo_alpha is not None:
losses = losses + self.args.rpo_alpha * model_output["nll_loss"] # RPO loss from V3 of the paper
if self.use_weighting:
losses = losses * model_output["policy_weights"]
if self.aux_loss_enabled:
losses = losses + self.aux_loss_coef * model_output["aux_loss"]
prefix = "eval_" if train_eval == "eval" else ""
metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean().item()
metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean().item()
metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean().item()
metrics[f"{prefix}rewards/margins"] = (
self.accelerator.gather_for_metrics(chosen_rewards - rejected_rewards).mean().item()
)
metrics[f"{prefix}logps/chosen"] = (
self.accelerator.gather_for_metrics(model_output["chosen_logps"]).detach().mean().item()
)
metrics[f"{prefix}logps/rejected"] = (
self.accelerator.gather_for_metrics(model_output["rejected_logps"]).detach().mean().item()
)
metrics[f"{prefix}logits/chosen"] = (
self.accelerator.gather_for_metrics(model_output["mean_chosen_logits"]).detach().mean().item()
)
metrics[f"{prefix}logits/rejected"] = (
self.accelerator.gather_for_metrics(model_output["mean_rejected_logits"]).detach().mean().item()
)
if self.args.rpo_alpha is not None:
metrics[f"{prefix}nll_loss"] = (
self.accelerator.gather_for_metrics(model_output["nll_loss"]).detach().mean().item()
)
if self.aux_loss_enabled:
metrics[f"{prefix}aux_loss"] = (
self.accelerator.gather_for_metrics(model_output["aux_loss"]).detach().mean().item()
)
return losses.mean(), metrics
def compute_loss(
self,
model: Union[PreTrainedModel, nn.Module],
inputs: dict[str, Union[torch.Tensor, Any]],
return_outputs=False,
num_items_in_batch=None,
) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
device_type = "xpu" if is_torch_xpu_available() else "cuda"
compute_loss_context_manager = (
amp.autocast(device_type) if self._peft_has_been_casted_to_bf16 else nullcontext()
)
with compute_loss_context_manager:
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train")
# Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class:
loss = loss.to(self.args.device)
# force log the metrics
self.store_metrics(metrics, train_eval="train")
if return_outputs:
return loss, metrics
return loss
def generate_from_model_and_ref(self, model, batch: dict[str, torch.LongTensor]) -> tuple[str, str]:
"""Generate samples from the model and reference model for the given batch of inputs."""
# If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
# the torch amp context manager as some hidden states are silently casted to full precision.
device_type = "xpu" if is_torch_xpu_available() else "cuda"
generate_context_manager = amp.autocast(device_type) if self._peft_has_been_casted_to_bf16 else nullcontext()
with generate_context_manager:
policy_output = model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.padding_value,
)
# if ref_output in batch use that otherwise use the reference model
if "ref_output" in batch:
ref_output = batch["ref_output"]
else:
if self.ref_model is None:
with self.null_ref_context():
ref_output = self.model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.padding_value,
)
else:
ref_output = self.ref_model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.padding_value,
)
policy_output = pad_to_length(policy_output, self.max_length, self.padding_value)
policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
ref_output = pad_to_length(ref_output, self.max_length, self.padding_value)
ref_output_decoded = self.processing_class.batch_decode(ref_output, skip_special_tokens=True)
return policy_output_decoded, ref_output_decoded
def prediction_step(
self,
model: Union[PreTrainedModel, nn.Module],
inputs: dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[list[str]] = None,
):
if ignore_keys is None:
if hasattr(model, "config"):
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
device_type = "xpu" if is_torch_xpu_available() else "cuda"
prediction_context_manager = amp.autocast(device_type) if self._peft_has_been_casted_to_bf16 else nullcontext()
with torch.no_grad(), prediction_context_manager:
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval")
# force log the metrics
self.store_metrics(metrics, train_eval="eval")
if prediction_loss_only:
return loss.detach(), None, None
# logits for the chosen and rejected samples from model
logits_dict = {
"eval_logits/chosen": metrics["eval_logits/chosen"],
"eval_logits/rejected": metrics["eval_logits/rejected"],
}
logits = tuple(v.unsqueeze(dim=0) for k, v in logits_dict.items() if k not in ignore_keys)
logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device)
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
return (loss.detach(), logits, labels)
def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
for key, value in metrics.items():
self._stored_metrics[train_eval][key].append(value)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[list[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Overriding built-in evaluation loop to store metrics for each batch.
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
# Sample and save to game log if requested (for one batch to save time)
if self.generate_during_eval:
# Generate random indices within the range of the total number of samples
num_samples = len(dataloader.dataset)
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
random_batch_dataset = dataloader.dataset.select(random_indices)
random_batch = self.data_collator(random_batch_dataset)
random_batch = self._prepare_inputs(random_batch)
policy_output_decoded, ref_output_decoded = self.generate_from_model_and_ref(self.model, random_batch)
table = pd.DataFrame(
columns=["Prompt", "Policy", "Ref Model"],
data=[
[prompt, pol[len(prompt) :], ref[len(prompt) :]]
for prompt, pol, ref in zip(
random_batch_dataset["prompt"], policy_output_decoded, ref_output_decoded
)
],
)
if "wandb" in self.args.report_to:
wandb.log({"game_log": wandb.Table(data=table)})
if "comet_ml" in self.args.report_to:
log_table_to_comet_experiment(
name="game_log.csv",
table=table,
)
# Base evaluation
initial_output = super().evaluation_loop(
dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
)
return initial_output
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
"""
Log `logs` on the various objects watching training, including stored metrics.
Args:
logs (`dict[str, float]`):
The values to log.
start_time (`float` or `None`, *optional*, defaults to `None`):
Start time of the training.
"""
# logs either has 'loss' or 'eval_loss'
train_eval = "train" if "loss" in logs else "eval"
# Add averaged stored metrics to logs
for key, metrics in self._stored_metrics[train_eval].items():
logs[key] = torch.tensor(metrics).mean().item()
del self._stored_metrics[train_eval]
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
return super().log(logs, start_time)
else: # transformers<=4.46
return super().log(logs)
def create_model_card(
self,
model_name: Optional[str] = None,
dataset_name: Optional[str] = None,
tags: Union[str, list[str], None] = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
model_name (`str` or `None`, *optional*, defaults to `None`):
Name of the model.
dataset_name (`str` or `None`, *optional*, defaults to `None`):
Name of the dataset used for training.
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
Tags to be associated with the model card.
"""
if not self.is_world_process_zero():
return
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
base_model = self.model.config._name_or_path
else:
base_model = None
tags = tags or []
if isinstance(tags, str):
tags = [tags]
if hasattr(self.model.config, "unsloth_version"):
tags.append("unsloth")
citation = textwrap.dedent(
"""\
@inproceedings{rafailov2023direct,
title = {{Direct Preference Optimization: Your Language Model is Secretly a Reward Model}},
author = {Rafael Rafailov and Archit Sharma and Eric Mitchell and Christopher D. Manning and Stefano Ermon and Chelsea Finn},
year = 2023,
booktitle = {Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023},
url = {http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-Abstract-Conference.html},
editor = {Alice Oh and Tristan Naumann and Amir Globerson and Kate Saenko and Moritz Hardt and Sergey Levine},
}"""
)
model_card = generate_model_card(
base_model=base_model,
model_name=model_name,
hub_model_id=self.hub_model_id,
dataset_name=dataset_name,
tags=tags,
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
comet_url=get_comet_experiment_url(),
trainer_name="DPO",
trainer_citation=citation,
paper_title="Direct Preference Optimization: Your Language Model is Secretly a Reward Model",
paper_id="2305.18290",
)
model_card.save(os.path.join(self.args.output_dir, "README.md"))
| trl/trl/trainer/dpo_trainer.py/0 | {
"file_path": "trl/trl/trainer/dpo_trainer.py",
"repo_id": "trl",
"token_count": 36273
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass, field
from typing import Optional
from ..trainer.utils import OnPolicyConfig
@dataclass
class PPOConfig(OnPolicyConfig):
r"""
Configuration class for the [`PPOTrainer`].
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
exp_name (`str`, *optional*, defaults to `os.path.basename(__file__)[:-3]`):
Name of this experiment.
reward_model_path (`str`, *optional*, defaults to `"EleutherAI/pythia-160m"`):
Path to the reward model.
model_adapter_name (`str` or `None`, *optional*, defaults to `None`):
Name of the train target PEFT adapter, when using LoRA with multiple adapters.
ref_adapter_name (`str` or `None`, *optional*, defaults to `None`):
Name of the reference PEFT adapter, when using LoRA with multiple adapters.
num_ppo_epochs (`int`, *optional*, defaults to `4`):
Number of epochs to train.
whiten_rewards (`bool`, *optional*, defaults to `False`):
Whether to whiten the rewards.
kl_coef (`float`, *optional*, defaults to `0.05`):
KL coefficient.
cliprange (`float`, *optional*, defaults to `0.2`):
Clip range.
vf_coef (`float`, *optional*, defaults to `0.1`):
Value function coefficient.
cliprange_value (`float`, *optional*, defaults to `0.2`):
Clip range for the value function.
gamma (`float`, *optional*, defaults to `1.0`):
Discount factor.
lam (`float`, *optional*, defaults to `0.95`):
Lambda value for GAE.
ds3_gather_for_generation (`bool`, *optional*, defaults to `True`):
This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation,
improving generation speed. However, disabling this option allows training models that exceed the VRAM
capacity of a single GPU, albeit at the cost of slower generation.
"""
exp_name: str = field(
default=os.path.basename(__file__)[:-3],
metadata={"help": "Name of this experiment."},
)
reward_model_path: str = field(
default="EleutherAI/pythia-160m",
metadata={"help": "Path to the reward model."},
)
model_adapter_name: Optional[str] = field(
default=None,
metadata={"help": "Name of the train target PEFT adapter, when using LoRA with multiple adapters."},
)
ref_adapter_name: Optional[str] = field(
default=None,
metadata={"help": "Name of the reference PEFT adapter, when using LoRA with multiple adapters."},
)
num_ppo_epochs: int = field(
default=4,
metadata={"help": "Number of epochs to train."},
)
whiten_rewards: bool = field(
default=False,
metadata={"help": "Whether to whiten the rewards."},
)
kl_coef: float = field(
default=0.05,
metadata={"help": "KL coefficient."},
)
cliprange: float = field(
default=0.2,
metadata={"help": "Clip range."},
)
vf_coef: float = field(
default=0.1,
metadata={"help": "Value function coefficient."},
)
cliprange_value: float = field(
default=0.2,
metadata={"help": "Clip range for the value function."},
)
gamma: float = field(
default=1.0,
metadata={"help": "Discount factor."},
)
lam: float = field(
default=0.95,
metadata={"help": "Lambda value for GAE."},
)
ds3_gather_for_generation: bool = field(
default=True,
metadata={
"help": "This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for "
"generation, improving generation speed. However, disabling this option allows training models that "
"exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation."
},
)
| trl/trl/trainer/ppo_config.py/0 | {
"file_path": "trl/trl/trainer/ppo_config.py",
"repo_id": "trl",
"token_count": 1824
} |
// File only needed for VSCode users to have proper Docker based interpreters
{
"name": "accelerate_dev_environment",
"build": {
// ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment
"dockerfile": "../docker/accelerate-cpu/Dockerfile"
// "dockerfile": "../docker/accelerate-gpu/Dockerfile"
},
"runArgs": [
// ACTION NEEDED: uncomment the next line if your local machine has GPUs available
// "--gpus", "all",
// Enable the docker container to access system resources
"--ipc", "host"
],
"remoteEnv": {
"PYTHONPATH": "${containerEnv:PATH}:${containerWorkspaceFolder}"
},
"customizations": {
"vscode": {
"extensions": [
// Ensure we have IntelliSense in VSCode when running inside container
"ms-python.python"
]
}
},
"workspaceFolder": "/workspaces/accelerate",
// Need git for VSCode to color code modifications. Only runs when building environment.
"onCreateCommand": "apt-get update && apt-get install -y git && pip install -e '.[dev]'"
} | accelerate/.devcontainer/devcontainer.json/0 | {
"file_path": "accelerate/.devcontainer/devcontainer.json",
"repo_id": "accelerate",
"token_count": 459
} |
<!---
Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# How to contribute to 🤗 Accelerate?
Everyone is welcome to contribute, and we value everybody's contribution. Code
is thus not the only way to help the community. Answering questions, helping
others, reaching out and improving the documentations are immensely valuable to
the community.
It also helps us if you spread the word: reference the library from blog posts
on the awesome projects it made possible, shout out on Twitter every time it has
helped you, or simply star the repo to say "thank you".
Whichever way you choose to contribute, please be mindful to respect our
[code of conduct](https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md).
## You can contribute in so many ways!
Some of the ways you can contribute to Accelerate:
* Fixing outstanding issues with the existing code;
* Contributing to the examples or to the documentation;
* Submitting issues related to bugs or desired new features.
## Submitting a new issue or feature request
Do your best to follow these guidelines when submitting an issue or a feature
request. It will make it easier for us to come back to you quickly and with good
feedback.
### Did you find a bug?
The 🤗 Accelerate library is robust and reliable thanks to the users who notify us of
the problems they encounter. So thank you for reporting an issue.
First, we would really appreciate it if you could **make sure the bug was not
already reported** (use the search bar on Github under Issues).
Did not find it? :( So we can act quickly on it, please follow these steps:
* Include your **OS type and version**, the versions of **Python** and **PyTorch**.
* A short, self-contained, code snippet that allows us to reproduce the bug in
less than 30s;
* Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_config.yaml`)
### Do you want a new feature?
A good feature request addresses the following points:
1. Motivation first:
* Is it related to a problem/frustration with the library? If so, please explain
why. Providing a code snippet that demonstrates the problem is best.
* Is it related to something you would need for a project? We'd love to hear
about it!
* Is it something you worked on and think could benefit the community?
Awesome! Tell us what problem it solved for you.
2. Write a *full paragraph* describing the feature;
3. Provide a **code snippet** that demonstrates its future use;
4. In case this is related to a paper, please attach a link;
5. Attach any additional information (drawings, screenshots, etc.) you think may help.
If your issue is well written we're already 80% of the way there by the time you
post it.
## Submitting a pull request (PR)
Before writing code, we strongly advise you to search through the existing PRs or
issues to make sure that nobody is already working on the same thing. If you are
unsure, it is always a good idea to open an issue to get some feedback.
You will need basic `git` proficiency to be able to contribute to
🤗 Accelerate. `git` is not the easiest tool to use but it has the greatest
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
Git](https://git-scm.com/book/en/v2) is a very good reference.
Follow these steps to start contributing:
1. Fork the [repository](https://github.com/huggingface/accelerate) by
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
under your GitHub user account.
2. Clone your fork to your local disk, and add the base repository as a remote. The following command
assumes you have your public SSH key uploaded to GitHub. See the following guide for more
[information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).
```bash
$ git clone [email protected]:<your Github handle>/accelerate.git
$ cd accelerate
$ git remote add upstream https://github.com/huggingface/accelerate.git
```
3. Create a new branch to hold your development changes, and do this for every new PR you work on.
Start by synchronizing your `main` branch with the `upstream/main` branch (ore details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)):
```bash
$ git checkout main
$ git fetch upstream
$ git merge upstream/main
```
Once your `main` branch is synchronized, create a new branch from it:
```bash
$ git checkout -b a-descriptive-name-for-my-changes
```
**Do not** work on the `main` branch.
4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:
```bash
$ pip install -e ".[dev]"
```
This will install all testing and linting/code quality dependencies for the library (see `quality`, `test_dev`,
`test_prod` targets in [`setup.py`](./setup.py)).
(If accelerate was already installed in the virtual environment, remove
it with `pip uninstall accelerate` before reinstalling it in editable
mode with the `-e` flag).
Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using
the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers).
5. Develop the features on your branch.
As you work on the features, you should make sure that the test suite
passes. You should run the tests impacted by your changes like this (see
below an explanation regarding the environment variable):
```bash
$ pytest tests/<TEST_TO_RUN>.py
```
> For the following commands leveraging the `make` utility, we recommend using the WSL system when running on
> Windows. More information [here](https://docs.microsoft.com/en-us/windows/wsl/about).
You can also run the full suite with the following command.
```bash
$ make test
```
`accelerate` relies on `ruff` to format its source code
consistently. After you make changes, apply automatic style corrections and code verifications
that can't be automated in one go with:
This target is also optimized to only work with files modified by the PR you're working on.
If you prefer to run the checks one after the other, the following command apply the
style corrections:
```bash
$ make style
```
`accelerate` also uses a few custom scripts to check for coding mistakes. Quality
control runs in CI, however you can also run the same checks with:
```bash
$ make quality
```
You can also set up [`pre-commit`](https://pre-commit.com/) to run these checks
automatically as Git commit hooks.
```bash
$ pip install pre-commit
$ pre-commit install
```
Once you're happy with your changes, add changed files using `git add` and
make a commit with `git commit` to record your changes locally:
```bash
$ git add modified_file.py
$ git commit
```
Please write [good commit messages](https://chris.beams.io/posts/git-commit/).
It is a good idea to sync your copy of the code with the original
repository regularly. This way you can quickly account for changes:
```bash
$ git fetch upstream
$ git rebase upstream/main
```
Push the changes to your account using:
```bash
$ git push -u origin a-descriptive-name-for-my-changes
```
6. Once you are satisfied (**and the checklist below is happy too**), go to the
webpage of your fork on GitHub. Click on 'Pull request' to send your changes
to the project maintainers for review.
7. It's ok if maintainers ask you for changes. It happens to core contributors
too! So everyone can see the changes in the Pull request, work in your local
branch and push the changes to your fork. They will automatically appear in
the pull request.
### Checklist
1. The title of your pull request should be a summary of its contribution;
2. If your pull request addresses an issue, please mention the issue number in
the pull request description to make sure they are linked (and people
consulting the issue know you are working on it);
3. To indicate a work in progress please prefix the title with `[WIP]`, or mark
the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate
it from PRs ready to be merged;
4. Make sure existing tests pass;
5. Add high-coverage tests. No quality testing = no merge.
See an example of a good PR here: https://github.com/huggingface/accelerate/pull/255
### Tests
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in
the [tests folder](https://github.com/huggingface/accelerate/tree/main/tests).
We use `pytest` in order to run the tests. From the root of the
repository, here's how to run tests with `pytest` for the library:
```bash
$ python -m pytest -sv ./tests
```
In fact, that's how `make test` is implemented (sans the `pip install` line)!
You can specify a smaller set of tests in order to test only the feature
you're working on.
| accelerate/CONTRIBUTING.md/0 | {
"file_path": "accelerate/CONTRIBUTING.md",
"repo_id": "accelerate",
"token_count": 2693
} |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
This particular script verifies this for DDP training.
"""
from unittest.mock import patch
import deepspeed
import evaluate
import torch
import transformer_engine.common.recipe as te_recipe
import transformer_engine.pytorch as te
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
from transformer_engine.common.recipe import DelayedScaling
from accelerate import Accelerator, DeepSpeedPlugin
from accelerate.state import AcceleratorState
from accelerate.utils import FP8RecipeKwargs, set_seed
from accelerate.utils.transformer_engine import convert_model
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
def train_baseline(zero_stage: int = 1):
# This forces transformers to think Zero-3 Init should be used
with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock:
mock.return_value = zero_stage == 3
set_seed(42)
accelerator = Accelerator()
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
# Convert the model to TE
old_named_params = get_named_parameters(model)
with torch.no_grad():
convert_model(model)
new_named_params = get_named_parameters(model)
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
for param_group in optimizer.param_groups:
param_group["params"] = [mapping[p] for p in param_group["params"]]
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
import numpy as np
config = {
"train_batch_size": 32,
"train_micro_batch_size_per_gpu": 16,
"gradient_accumulation_steps": 1,
"zero_optimization": {
"stage": zero_stage,
"offload_optimizer": {"device": "none", "nvme_path": None},
"offload_param": {"device": "none", "nvme_path": None},
"stage3_gather_16bit_weights_on_model_save": False,
},
"gradient_clipping": 1.0,
"steps_per_print": np.inf,
"bf16": {"enabled": True},
"fp16": {"enabled": False},
"zero_allow_untested_optimizer": True,
}
(
model,
optimizer,
_,
_,
) = deepspeed.initialize(
model=model,
optimizer=optimizer,
config_params=config,
)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
model_outputs = []
data = []
for _ in range(2):
for batch in train_dataloader:
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
outputs = model(**batch)
data.append(batch.to("cpu"))
model_outputs.append(outputs.logits.to("cpu"))
loss = outputs.loss
model.backward(loss)
model.step()
for _ in range(accelerator.num_processes):
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.destroy()
assert (
trained_model_results["accuracy"] > base_model_results["accuracy"]
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
assert (
trained_model_results["f1"] > base_model_results["f1"]
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
return base_model_results, trained_model_results, model_outputs, data
def train_integration(zero_stage: int = 1):
set_seed(42)
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
AcceleratorState()._reset_state(True)
deepspeed_plugin = DeepSpeedPlugin(
zero_stage=zero_stage,
zero3_init_flag=zero_stage == 3,
)
accelerator = Accelerator(
mixed_precision="fp8", kwargs_handlers=kwargs_handlers, deepspeed_plugin=deepspeed_plugin
)
accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
model_outputs = []
data = []
for _ in range(2):
for batch in train_dataloader:
outputs = model(**batch)
data.append(batch.to("cpu"))
model_outputs.append(outputs.logits.to("cpu"))
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.destroy()
assert (
trained_model_results["accuracy"] > base_model_results["accuracy"]
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
assert (
trained_model_results["f1"] > base_model_results["f1"]
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
return base_model_results, trained_model_results, model_outputs, data
if __name__ == "__main__":
# for zero_stage in [1, 2, 3]:
zero_stage = 1
baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage)
accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration(zero_stage)
assert (
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
assert (
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
assert (
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
), f'ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
assert (
baseline_trained["f1"] == accelerator_trained["f1"]
), f'ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
torch.distributed.destroy_process_group()
| accelerate/benchmarks/fp8/transformer_engine/distrib_deepspeed.py/0 | {
"file_path": "accelerate/benchmarks/fp8/transformer_engine/distrib_deepspeed.py",
"repo_id": "accelerate",
"token_count": 2964
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Overview
Welcome to the Accelerate tutorials! These introductory guides will help catch you up to speed on working with Accelerate.
You'll learn how to modify your code to have it work with the API seamlessly, how to launch your script properly,
and more!
These tutorials assume some basic knowledge of Python and familiarity with the PyTorch framework.
If you have any questions about Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18). | accelerate/docs/source/basic_tutorials/overview.md/0 | {
"file_path": "accelerate/docs/source/basic_tutorials/overview.md",
"repo_id": "accelerate",
"token_count": 303
} |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Working with large models
## Dispatch and offload
### init_empty_weights
[[autodoc]] big_modeling.init_empty_weights
### cpu_offload
[[autodoc]] big_modeling.cpu_offload
### cpu_offload_with_hook
[[autodoc]] big_modeling.cpu_offload_with_hook
### disk_offload
[[autodoc]] big_modeling.disk_offload
### dispatch_model
[[autodoc]] big_modeling.dispatch_model
### load_checkpoint_and_dispatch
[[autodoc]] big_modeling.load_checkpoint_and_dispatch
### load_checkpoint_in_model
[[autodoc]] big_modeling.load_checkpoint_in_model
### infer_auto_device_map
[[autodoc]] utils.infer_auto_device_map
## Hooks
### ModelHook
[[autodoc]] hooks.ModelHook
### AlignDevicesHook
[[autodoc]] hooks.AlignDevicesHook
### SequentialHook
[[autodoc]] hooks.SequentialHook
## Adding Hooks
### add_hook_to_module
[[autodoc]] hooks.add_hook_to_module
### attach_execution_device_hook
[[autodoc]] hooks.attach_execution_device_hook
### attach_align_device_hook
[[autodoc]] hooks.attach_align_device_hook
### attach_align_device_hook_on_blocks
[[autodoc]] hooks.attach_align_device_hook_on_blocks
## Removing Hooks
### remove_hook_from_module
[[autodoc]] hooks.remove_hook_from_module
### remove_hook_from_submodules
[[autodoc]] hooks.remove_hook_from_submodules
## Utilities
### has_offloaded_params
[[autodoc]] utils.has_offloaded_params
### align_module_device
[[autodoc]] utils.align_module_device | accelerate/docs/source/package_reference/big_modeling.md/0 | {
"file_path": "accelerate/docs/source/package_reference/big_modeling.md",
"repo_id": "accelerate",
"token_count": 712
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Checkpointing
When training a PyTorch model with Accelerate, you may often want to save and continue a state of training. Doing so requires
saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside Accelerate are two convenience functions to achieve this quickly:
- Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location
- Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state`
To further customize where and how states are saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example
if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`.
It should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts.
- By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions,
so long as the object has a `state_dict` **and** a `load_state_dict` functionality. This could include objects such as a learning rate scheduler.
Below is a brief example using checkpointing to save and reload a state during training:
```python
from accelerate import Accelerator
import torch
accelerator = Accelerator(project_dir="my/save/path")
my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99)
my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader)
# Register the LR scheduler
accelerator.register_for_checkpointing(my_scheduler)
# Save the starting state
accelerator.save_state()
device = accelerator.device
my_model.to(device)
# Perform training
for epoch in range(num_epochs):
for batch in my_training_dataloader:
my_optimizer.zero_grad()
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
outputs = my_model(inputs)
loss = my_loss_function(outputs, targets)
accelerator.backward(loss)
my_optimizer.step()
my_scheduler.step()
# Restore the previous state
accelerator.load_state("my/save/path/checkpointing/checkpoint_0")
```
## Restoring the state of the DataLoader
After resuming from a checkpoint, it may also be desirable to resume from a particular point in the active `DataLoader` if
the state was saved during the middle of an epoch. You can use [`~Accelerator.skip_first_batches`] to do so.
```python
from accelerate import Accelerator
accelerator = Accelerator(project_dir="my/save/path")
train_dataloader = accelerator.prepare(train_dataloader)
accelerator.load_state("my_state")
# Assume the checkpoint was saved 100 steps into the epoch
skipped_dataloader = accelerator.skip_first_batches(train_dataloader, 100)
# After the first iteration, go back to `train_dataloader`
# First epoch
for batch in skipped_dataloader:
# Do something
pass
# Second epoch
for batch in train_dataloader:
# Do something
pass
```
| accelerate/docs/source/usage_guides/checkpoint.md/0 | {
"file_path": "accelerate/docs/source/usage_guides/checkpoint.md",
"repo_id": "accelerate",
"token_count": 1150
} |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Amazon SageMaker
Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to
make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/).
## Getting Started
### Setup & Installation
Before you can run your Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not
have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html).
After you have your AWS Account you need to install the `sagemaker` sdk for Accelerate with:
```bash
pip install "accelerate[sagemaker]" --upgrade
```
Accelerate currently uses the DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a
`requirements.txt` in the same directory where your training script is located and add it as dependency:
```
accelerate
```
You should also add any other dependencies you have to this `requirements.txt`.
### Configure Accelerate
You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with
the Accelerate CLI:
```bash
accelerate config
# In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1
```
Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit.
<Tip>
Accelerate is not saving any of your credentials.
</Tip>
### Prepare a Accelerate fine-tuning script
The training script is very similar to a training script you might run outside of SageMaker, but to save your model
after training you need to specify either `/opt/ml/model` or use `os.environ["SM_MODEL_DIR"]` as your save
directory. After training, artifacts in this directory are uploaded to S3:
```diff
- torch.save('/opt/ml/model`)
+ accelerator.save('/opt/ml/model')
```
<Tip warning={true}>
SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to
specify type as bool in your script and provide an explicit True or False value for this hyperparameter. [[REF]](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script).
</Tip>
### Launch Training
You can launch your training with Accelerate CLI with:
```
accelerate launch path_to_script.py --args_to_the_script
```
This will launch your training script using your configuration. The only thing you have to do is provide all the
arguments needed by your training script as named arguments.
**Examples**
<Tip>
If you run one of the example scripts, don't forget to add `accelerator.save('/opt/ml/model')` to it.
</Tip>
```bash
accelerate launch ./examples/sagemaker_example.py
```
Outputs:
```
Configuring Amazon SageMaker environment
Converting Arguments to Hyperparameters
Creating Estimator
2021-04-08 11:56:50 Starting - Starting the training job...
2021-04-08 11:57:13 Starting - Launching requested ML instancesProfilerReport-1617883008: InProgress
.........
2021-04-08 11:58:54 Starting - Preparing the instances for training.........
2021-04-08 12:00:24 Downloading - Downloading input data
2021-04-08 12:00:24 Training - Downloading the training image..................
2021-04-08 12:03:39 Training - Training image download completed. Training in progress..
........
epoch 0: {'accuracy': 0.7598039215686274, 'f1': 0.8178438661710037}
epoch 1: {'accuracy': 0.8357843137254902, 'f1': 0.882249560632689}
epoch 2: {'accuracy': 0.8406862745098039, 'f1': 0.8869565217391304}
........
2021-04-08 12:05:40 Uploading - Uploading generated training model
2021-04-08 12:05:40 Completed - Training job completed
Training seconds: 331
Billable seconds: 331
You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04-08-11-56-47-108/output/model.tar.gz
```
## Advanced Features
### Distributed Training: Data Parallelism
Set up the accelerate config by running `accelerate config` and answer the SageMaker questions and set it up.
To use SageMaker DDP, select it when asked
`What is the distributed mode? ([0] No distributed training, [1] data parallelism):`.
Example config below:
```yaml
base_job_name: accelerate-sagemaker-1
compute_environment: AMAZON_SAGEMAKER
distributed_type: DATA_PARALLEL
ec2_instance_type: ml.p3.16xlarge
iam_role_name: xxxxx
image_uri: null
mixed_precision: fp16
num_machines: 1
profile: xxxxx
py_version: py10
pytorch_version: 2.5.0
region: us-east-1
transformers_version: 4.17.0
use_cpu: false
```
### Distributed Training: Model Parallelism
*currently in development, will be supported soon.*
### Python packages and dependencies
Accelerate currently uses the DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. If you
want to use different/other Python packages you can do this by adding them to the `requirements.txt`. These packages
will be installed before your training script is started.
### Local Training: SageMaker Local mode
The local mode in the SageMaker SDK allows you to run your training script locally inside the HuggingFace DLC (Deep Learning container)
or using your custom container image. This is useful for debugging and testing your training script inside the final container environment.
Local mode uses Docker compose (*Note: Docker Compose V2 is not supported yet*). The SDK will handle the authentication against ECR
to pull the DLC to your local environment. You can emulate CPU (single and multi-instance) and GPU (single instance) SageMaker training jobs.
To use local mode, you need to set your `ec2_instance_type` to `local`.
```yaml
ec2_instance_type: local
```
### Advanced configuration
The configuration allows you to override parameters for the [Estimator](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html).
These settings have to be applied in the config file and are not part of `accelerate config`. You can control many additional aspects of the training job, e.g. use Spot instances, enable network isolation and many more.
```yaml
additional_args:
# enable network isolation to restrict internet access for containers
enable_network_isolation: True
```
You can find all available configuration [here](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html).
### Use Spot Instances
You can use Spot Instances e.g. using (see [Advanced configuration](#advanced-configuration)):
```yaml
additional_args:
use_spot_instances: True
max_wait: 86400
```
*Note: Spot Instances are subject to be terminated and training to be continued from a checkpoint. This is not handled in Accelerate out of the box. Contact us if you would like this feature.*
### Remote scripts: Use scripts located on Github
*undecided if feature is needed. Contact us if you would like this feature.* | accelerate/docs/source/usage_guides/sagemaker.md/0 | {
"file_path": "accelerate/docs/source/usage_guides/sagemaker.md",
"repo_id": "accelerate",
"token_count": 2220
} |
{
"fp16": {
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"weight_decay": "auto",
"torch_adam": true,
"adam_w_mode": true
}
},
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto",
"total_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 1,
"allgather_partitions": true,
"allgather_bucket_size": 2e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": "auto",
"contiguous_gradients": true
},
"gradient_accumulation_steps": 1,
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
} | accelerate/examples/deepspeed_config_templates/zero_stage1_config.json/0 | {
"file_path": "accelerate/examples/deepspeed_config_templates/zero_stage1_config.json",
"repo_id": "accelerate",
"token_count": 614
} |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from packaging import version
from transformers import AutoModelForSeq2SeqLM
from accelerate import PartialState, prepare_pippy
from accelerate import __version__ as accelerate_version
from accelerate.utils import set_seed
if version.parse(accelerate_version) > version.parse("0.33.0"):
raise RuntimeError(
"Using encoder/decoder models is not supported with the `torch.pipelining` integration or accelerate>=0.34.0. "
"Please use a lower accelerate version and `torchpippy`, which this example uses."
)
# Set the random seed to have reproducable outputs
set_seed(42)
# Create an example model
model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
model.eval()
# Input configs
# Create example inputs for the model
input = torch.randint(
low=0,
high=model.config.vocab_size,
size=(2, 1024), # bs x seq_len
device="cpu",
dtype=torch.int64,
requires_grad=False,
)
example_inputs = {"input_ids": input, "decoder_input_ids": input}
# Create a pipeline stage from the model
# Using `auto` is equivalent to letting `device_map="auto"` figure
# out device mapping and will also split the model according to the
# number of total GPUs available if it fits on one GPU
model = prepare_pippy(
model,
no_split_module_classes=["T5Block"],
example_kwargs=example_inputs,
)
# You can pass `gather_output=True` to have the output from the model
# available on all GPUs
# model = prepare_pippy(
# model,
# no_split_module_classes=["T5Block"],
# example_kwargs=example_inputs,
# gather_outputs=True
# )
# The model expects a tuple during real inference
# with the data on the first device
args = (example_inputs["input_ids"].to("cuda:0"), example_inputs["decoder_input_ids"].to("cuda:0"))
# Take an average of 5 times
# Measure first batch
torch.cuda.synchronize()
start_time = time.time()
with torch.no_grad():
output = model(*args)
torch.cuda.synchronize()
end_time = time.time()
first_batch = end_time - start_time
# Now that CUDA is init, measure after
torch.cuda.synchronize()
start_time = time.time()
for i in range(5):
with torch.no_grad():
output = model(*args)
torch.cuda.synchronize()
end_time = time.time()
# The outputs are only on the final process by default
if PartialState().is_last_process:
output = torch.stack(tuple(output[0]))
print(f"Time of first pass: {first_batch}")
print(f"Average time per batch: {(end_time - start_time) / 5}")
PartialState().destroy_process_group()
| accelerate/examples/inference/pippy/t5.py/0 | {
"file_path": "accelerate/examples/inference/pippy/t5.py",
"repo_id": "accelerate",
"token_count": 1023
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage2(Scene):
def construct(self):
# The dataset items
fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0)
columns = [
VGroup(*[Rectangle(height=0.25,width=0.25,color="green") for i in range(8)]).arrange(RIGHT,buff=0)
for j in range(4)
]
dataset_recs = VGroup(*columns).arrange(UP, buff=0)
dataset_text = Text("Dataset", font_size=24)
dataset = Group(dataset_recs,dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
dataset.move_to([-2,0,0])
self.add(dataset)
code = Code(
code="dataloader = DataLoader(...)\nfor batch in dataloader():\n\t...",
tab_width=4,
background="window",
language="Python",
font="Monospace",
font_size=14,
corner_radius=.2,
insert_line_no=False,
line_spacing=.75,
style=Code.styles_list[1],
)
code.move_to([-3.5, 2.5, 0])
self.add(code)
# The dataloader itself
dataloader = Group(
Rectangle(color="red", height=2, width=2),
Text("DataLoader", font_size=24)
).arrange(DOWN, buff=.5, aligned_edge=DOWN)
sampler = Group(
Rectangle(color="blue", height=1, width=1),
Text("Sampler", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN)
dataloader.move_to([1, 0, 0])
sampler.move_to([.75,.25,0])
self.add(dataloader)
self.add(sampler)
gpu_1 = Group(
Rectangle(color="white", height=1, width=1),
Text("GPU 1", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, 2, 0])
gpu_2 = Group(
Rectangle(color="white", height=1, width=1),
Text("GPU 2", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, .5, 0])
gpu_3 = Group(
Rectangle(color="white", height=1, width=1),
Text("GPU 3", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, -1, 0])
gpu_4 = Group(
Rectangle(color="white", height=1, width=1),
Text("GPU 4", font_size=12)
).arrange(DOWN, buff=.25, aligned_edge=DOWN).move_to([4, -2.5, 0])
gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
self.add(gpu_1, gpu_2, gpu_3, gpu_4)
# Animate their existence
self.play(
Create(gpu_1[0], run_time=0.5),
Create(gpu_2[0], run_time=0.5),
Create(gpu_3[0], run_time=0.5),
Create(gpu_4[0], run_time=0.5),
Create(dataset_recs, run_time=1),
Create(sampler[0], run_time=1),
Create(dataloader[0], run_time=1)
)
step_1 = MarkupText(
f"Without any special care, \nthe same data is sent though each sampler, \nand the same samples are spit out on each GPU",
font_size=18
)
step_1.move_to([0, -2.5, 0])
self.play(
Write(step_1, run_time=4),
)
first_animations = []
second_animations = []
colors = ["BLUE_E", "DARK_BROWN", "GOLD_E", "GRAY_A"]
current_color = colors[0]
buff = 0
lr_buff = .25
old_target = None
new_datasets = []
for i,data in enumerate(dataset_recs[-1]):
if i % 2 == 0:
# current_color = colors[i//2]
current_color = "BLUE_E"
dataset_target = Rectangle(height=0.46/2,width=0.46/2).set_stroke(width=0.).set_fill(current_color, opacity=0.7)
dataset_target.move_to(data)
dataset_target.generate_target()
aligned_edge = ORIGIN
if i % 2 == 0:
old_target = dataset_target.target
buff -= .25
aligned_edge = LEFT
dataset_target.target.next_to(
sampler, buff=buff, direction=UP,
aligned_edge=LEFT
)
else:
dataset_target.target.next_to(
old_target, direction=RIGHT, buff=0.01,
)
new_datasets.append(dataset_target)
first_animations.append(data.animate(run_time=0.5).set_stroke(current_color))
second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
self.play(*first_animations)
self.play(*second_animations)
self.wait()
move_animation = []
for j,gpu in enumerate(gpus):
buff = 0
for i,data in enumerate(new_datasets):
if i % 2 == 0:
current_color = colors[i//2]
if j != 3:
data = data.copy()
data.generate_target()
aligned_edge = ORIGIN
if i % 2 == 0:
old_target = data.target
buff -= .25
aligned_edge = LEFT
data.target.next_to(
gpu, buff=buff, direction=UP,
aligned_edge=LEFT
)
else:
data.target.next_to(
old_target, direction=RIGHT, buff=0.01,
)
move_animation.append(MoveToTarget(data, run_time=1.5))
self.play(*move_animation)
self.remove(step_1)
step_2 = MarkupText(
f"This behavior is undesireable, because we want\neach GPU to see different data for efficient training.",
font_size=18
)
step_2.move_to([0, -2.5, 0])
self.play(
Write(step_2, run_time=2.5),
)
self.wait() | accelerate/manim_animations/dataloaders/stage_2.py/0 | {
"file_path": "accelerate/manim_animations/dataloaders/stage_2.py",
"repo_id": "accelerate",
"token_count": 3396
} |
#!/usr/bin/env python
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def get_user_input():
compute_environment = _ask_options(
"In which compute environment are you running?",
["This machine", "AWS (Amazon SageMaker)"],
_convert_compute_environment,
)
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
config = get_sagemaker_input()
else:
config = get_cluster_input()
return config
def config_command_parser(subparsers=None):
if subparsers is not None:
parser = subparsers.add_parser("config", description=description)
else:
parser = argparse.ArgumentParser("Accelerate config command", description=description)
parser.add_argument(
"--config_file",
default=None,
help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
),
)
if subparsers is not None:
parser.set_defaults(func=config_command)
return parser
def config_command(args):
config = get_user_input()
if args.config_file is not None:
config_file = args.config_file
else:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
config_file = default_yaml_config_file
if config_file.endswith(".json"):
config.to_json_file(config_file)
else:
config.to_yaml_file(config_file)
print(f"accelerate configuration saved at {config_file}")
def main():
parser = config_command_parser()
args = parser.parse_args()
config_command(args)
if __name__ == "__main__":
main()
| accelerate/src/accelerate/commands/config/config.py/0 | {
"file_path": "accelerate/src/accelerate/commands/config/config.py",
"repo_id": "accelerate",
"token_count": 1067
} |
#!/usr/bin/env python
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package
def test_command_parser(subparsers=None):
if subparsers is not None:
parser = subparsers.add_parser("test")
else:
parser = argparse.ArgumentParser("Accelerate test command")
parser.add_argument(
"--config_file",
default=None,
help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
),
)
if subparsers is not None:
parser.set_defaults(func=test_command)
return parser
def test_command(args):
script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py")
if args.config_file is None:
test_args = [script_name]
else:
test_args = f"--config_file={args.config_file} {script_name}".split()
cmd = ["accelerate-launch"] + test_args
result = execute_subprocess_async(cmd)
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!")
def main():
parser = test_command_parser()
args = parser.parse_args()
test_command(args)
if __name__ == "__main__":
main()
| accelerate/src/accelerate/commands/test.py/0 | {
"file_path": "accelerate/src/accelerate/commands/test.py",
"repo_id": "accelerate",
"token_count": 755
} |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import inspect
import io
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from contextlib import contextmanager
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
import accelerate
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_clearml_available,
is_comet_ml_available,
is_cuda_available,
is_datasets_available,
is_deepspeed_available,
is_dvclive_available,
is_import_timer_available,
is_mlu_available,
is_mps_available,
is_musa_available,
is_npu_available,
is_pandas_available,
is_pippy_available,
is_schedulefree_available,
is_tensorboard_available,
is_timm_available,
is_torch_version,
is_torch_xla_available,
is_torchdata_stateful_dataloader_available,
is_torchvision_available,
is_transformer_engine_available,
is_transformers_available,
is_triton_available,
is_wandb_available,
is_xpu_available,
str_to_bool,
)
def get_backend():
if is_torch_xla_available():
return "xla", torch.cuda.device_count(), torch.cuda.memory_allocated
elif is_cuda_available():
return "cuda", torch.cuda.device_count(), torch.cuda.memory_allocated
elif is_mps_available(min_version="2.0"):
return "mps", 1, torch.mps.current_allocated_memory
elif is_mps_available():
return "mps", 1, lambda: 0
elif is_mlu_available():
return "mlu", torch.mlu.device_count(), torch.mlu.memory_allocated
elif is_musa_available():
return "musa", torch.musa.device_count(), torch.musa.memory_allocated
elif is_npu_available():
return "npu", torch.npu.device_count(), torch.npu.memory_allocated
elif is_xpu_available():
return "xpu", torch.xpu.device_count(), torch.xpu.memory_allocated
else:
return "cpu", 1, lambda: 0
torch_device, device_count, memory_allocated_func = get_backend()
def get_launch_command(**kwargs) -> list:
"""
Wraps around `kwargs` to help simplify launching from `subprocess`.
Example:
```python
# returns ['accelerate', 'launch', '--num_processes=2', '--device_count=2']
get_launch_command(num_processes=2, device_count=2)
```
"""
command = ["accelerate", "launch"]
for k, v in kwargs.items():
if isinstance(v, bool) and v:
command.append(f"--{k}")
elif v is not None:
command.append(f"--{k}={v}")
return command
DEFAULT_LAUNCH_COMMAND = get_launch_command(num_processes=device_count, monitor_interval=0.1)
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = str_to_bool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
def skip(test_case):
"Decorator that skips a test unconditionally"
return unittest.skip("Test was skipped")(test_case)
def slow(test_case):
"""
Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a
truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def require_cpu(test_case):
"""
Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available.
"""
return unittest.skipUnless(torch_device == "cpu", "test requires only a CPU")(test_case)
def require_non_cpu(test_case):
"""
Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
hardware accelerator available.
"""
return unittest.skipUnless(torch_device != "cpu", "test requires a GPU")(test_case)
def require_cuda(test_case):
"""
Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available or when
TorchXLA is available.
"""
return unittest.skipUnless(is_cuda_available() and not is_torch_xla_available(), "test requires a GPU")(test_case)
def require_xpu(test_case):
"""
Decorator marking a test that requires XPU. These tests are skipped when there are no XPU available.
"""
return unittest.skipUnless(is_xpu_available(), "test requires a XPU")(test_case)
def require_cuda_or_xpu(test_case):
"""
Decorator marking a test that requires CUDA or XPU. These tests are skipped when there are no GPU available or when
TorchXLA is available.
"""
cuda_condition = is_cuda_available() and not is_torch_xla_available()
xpu_condition = is_xpu_available()
return unittest.skipUnless(cuda_condition or xpu_condition, "test requires a CUDA GPU or XPU")(test_case)
def require_non_xpu(test_case):
"""
Decorator marking a test that should be skipped for XPU.
"""
return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case)
def require_mlu(test_case):
"""
Decorator marking a test that requires MLU. These tests are skipped when there are no MLU available.
"""
return unittest.skipUnless(is_mlu_available(), "test require a MLU")(test_case)
def require_musa(test_case):
"""
Decorator marking a test that requires MUSA. These tests are skipped when there are no MUSA available.
"""
return unittest.skipUnless(is_musa_available(), "test require a MUSA")(test_case)
def require_npu(test_case):
"""
Decorator marking a test that requires NPU. These tests are skipped when there are no NPU available.
"""
return unittest.skipUnless(is_npu_available(), "test require a NPU")(test_case)
def require_mps(test_case):
"""
Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps`
backend.
"""
return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(test_case)
def require_huggingface_suite(test_case):
"""
Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.
"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available(),
"test requires the Hugging Face suite",
)(test_case)
def require_transformers(test_case):
"""
Decorator marking a test that requires transformers. These tests are skipped when they are not.
"""
return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires timm. These tests are skipped when they are not.
"""
return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case)
def require_torchvision(test_case):
"""
Decorator marking a test that requires torchvision. These tests are skipped when they are not.
"""
return unittest.skipUnless(is_torchvision_available(), "test requires the torchvision library")(test_case)
def require_triton(test_case):
"""
Decorator marking a test that requires triton. These tests are skipped when they are not.
"""
return unittest.skipUnless(is_triton_available(), "test requires the triton library")(test_case)
def require_schedulefree(test_case):
"""
Decorator marking a test that requires schedulefree. These tests are skipped when they are not.
"""
return unittest.skipUnless(is_schedulefree_available(), "test requires the schedulefree library")(test_case)
def require_bnb(test_case):
"""
Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not.
"""
return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library")(test_case)
def require_tpu(test_case):
"""
Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.
"""
return unittest.skipUnless(is_torch_xla_available(check_is_tpu=True), "test requires TPU")(test_case)
def require_non_torch_xla(test_case):
"""
Decorator marking a test as requiring an environment without TorchXLA. These tests are skipped when TorchXLA is
available.
"""
return unittest.skipUnless(not is_torch_xla_available(), "test requires an env without TorchXLA")(test_case)
def require_single_device(test_case):
"""
Decorator marking a test that requires a single device. These tests are skipped when there is no hardware
accelerator available or number of devices is more than one.
"""
return unittest.skipUnless(torch_device != "cpu" and device_count == 1, "test requires a hardware accelerator")(
test_case
)
def require_single_gpu(test_case):
"""
Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU
available or number of GPUs is more than one.
"""
return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(test_case)
def require_single_xpu(test_case):
"""
Decorator marking a test that requires CUDA on a single XPU. These tests are skipped when there are no XPU
available or number of xPUs is more than one.
"""
return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU")(test_case)
def require_multi_device(test_case):
"""
Decorator marking a test that requires a multi-device setup. These tests are skipped on a machine without multiple
devices.
"""
return unittest.skipUnless(device_count > 1, "test requires multiple hardware accelerators")(test_case)
def require_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple
GPUs.
"""
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup. These tests are skipped on a machine without multiple
XPUs.
"""
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
def require_deepspeed(test_case):
"""
Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed
"""
return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(test_case)
def require_tp(test_case):
"""
Decorator marking a test that requires TP installed. These tests are skipped when TP isn't installed
"""
return unittest.skipUnless(is_torch_version(">=", "2.3.0"), "test requires torch version >= 2.3.0")(test_case)
def require_torch_min_version(test_case=None, version=None):
"""
Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an
installed torch version is less than the required one.
"""
if test_case is None:
return partial(require_torch_min_version, version=version)
return unittest.skipUnless(is_torch_version(">=", version), f"test requires torch version >= {version}")(test_case)
def require_tensorboard(test_case):
"""
Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't
installed
"""
return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(test_case)
def require_wandb(test_case):
"""
Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed
"""
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
def require_comet_ml(test_case):
"""
Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed
"""
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case)
def require_clearml(test_case):
"""
Decorator marking a test that requires clearml installed. These tests are skipped when clearml isn't installed
"""
return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
def require_dvclive(test_case):
"""
Decorator marking a test that requires dvclive installed. These tests are skipped when dvclive isn't installed
"""
return unittest.skipUnless(is_dvclive_available(), "test requires dvclive")(test_case)
def require_pandas(test_case):
"""
Decorator marking a test that requires pandas installed. These tests are skipped when pandas isn't installed
"""
return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
def require_pippy(test_case):
"""
Decorator marking a test that requires pippy installed. These tests are skipped when pippy isn't installed
"""
return unittest.skipUnless(is_pippy_available(), "test requires pippy")(test_case)
def require_import_timer(test_case):
"""
Decorator marking a test that requires tuna interpreter installed. These tests are skipped when tuna isn't
installed
"""
return unittest.skipUnless(is_import_timer_available(), "test requires tuna interpreter")(test_case)
def require_transformer_engine(test_case):
"""
Decorator marking a test that requires transformers engine installed. These tests are skipped when transformers
engine isn't installed
"""
return unittest.skipUnless(is_transformer_engine_available(), "test requires transformers engine")(test_case)
_atleast_one_tracker_available = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def require_trackers(test_case):
"""
Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none
are installed
"""
return unittest.skipUnless(
_atleast_one_tracker_available,
"test requires at least one tracker to be available and for `comet_ml` to not be installed",
)(test_case)
def require_torchdata_stateful_dataloader(test_case):
"""
Decorator marking a test that requires torchdata.stateful_dataloader.
These tests are skipped when torchdata with stateful_dataloader module isn't installed.
"""
return unittest.skipUnless(
is_torchdata_stateful_dataloader_available(), "test requires torchdata.stateful_dataloader"
)(test_case)
class TempDirTestCase(unittest.TestCase):
"""
A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its
data at the start of a test, and then destroyes it at the end of the TestCase.
Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases
The temporary directory location will be stored in `self.tmpdir`
"""
clear_on_setup = True
@classmethod
def setUpClass(cls):
"Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`"
cls.tmpdir = Path(tempfile.mkdtemp())
@classmethod
def tearDownClass(cls):
"Remove `cls.tmpdir` after test suite has finished"
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def setUp(self):
"Destroy all contents in `self.tmpdir`, but not `self.tmpdir`"
if self.clear_on_setup:
for path in self.tmpdir.glob("**/*"):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(path)
class AccelerateTestCase(unittest.TestCase):
"""
A TestCase class that will reset the accelerator state at the end of every test. Every test that checks or utilizes
the `AcceleratorState` class should inherit from this to avoid silent failures due to state being shared between
tests.
"""
def tearDown(self):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class MockingTestCase(unittest.TestCase):
"""
A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the
behavior of a class-wide mock when defining one normally will not do.
Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as
setting an environment variable with that information.
The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to
`super().setUp()` such as:
```python
def setUp(self):
super().setUp()
mocks = mock.patch.dict(os.environ, {"SOME_ENV_VAR", "SOME_VALUE"})
self.add_mocks(mocks)
```
"""
def add_mocks(self, mocks: Union[mock.Mock, List[mock.Mock]]):
"""
Add custom mocks for tests that should be repeated on each test. Should be called during
`MockingTestCase.setUp`, after `super().setUp()`.
Args:
mocks (`mock.Mock` or list of `mock.Mock`):
Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run
"""
self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def are_the_same_tensors(tensor):
state = AcceleratorState()
tensor = tensor[None].clone().to(state.device)
tensors = gather(tensor).cpu()
tensor = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i], tensor):
return False
return True
class _RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
if echo:
print("\nRunning: ", " ".join(cmd))
p = await asyncio.create_subprocess_exec(
cmd[0],
*cmd[1:],
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env=env,
)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))),
],
timeout=timeout,
)
return _RunOutput(await p.wait(), out, err)
def execute_subprocess_async(cmd: list, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
# Cast every path in `cmd` to a string
for i, c in enumerate(cmd):
if isinstance(c, Path):
cmd[i] = str(c)
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
)
cmd_str = " ".join(cmd)
if result.returncode > 0:
stderr = "\n".join(result.stderr)
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}"
)
return result
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False, env=None):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occured while running `command`
"""
# Cast every path in `command` to a string
for i, c in enumerate(command):
if isinstance(c, Path):
command[i] = str(c)
if env is None:
env = os.environ.copy()
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, env=env)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
def path_in_accelerate_package(*components: str) -> Path:
"""
Get a path within the `accelerate` package's directory.
Args:
*components: Components of the path to join after the package directory.
Returns:
`Path`: The path to the requested file or directory.
"""
accelerate_package_dir = Path(inspect.getfile(accelerate)).parent
return accelerate_package_dir.joinpath(*components)
@contextmanager
def assert_exception(exception_class: Exception, msg: str = None) -> bool:
"""
Context manager to assert that the right `Exception` class was raised.
If `msg` is provided, will check that the message is contained in the raised exception.
"""
was_ran = False
try:
yield
was_ran = True
except Exception as e:
assert isinstance(e, exception_class), f"Expected exception of type {exception_class} but got {type(e)}"
if msg is not None:
assert msg in str(e), f"Expected message '{msg}' to be in exception but got '{str(e)}'"
if was_ran:
raise AssertionError(f"Expected exception of type {exception_class} but ran without issue.")
def capture_call_output(func, *args, **kwargs):
"""
Takes in a `func` with `args` and `kwargs` and returns the captured stdout as a string
"""
captured_output = io.StringIO()
original_stdout = sys.stdout
try:
sys.stdout = captured_output
func(*args, **kwargs)
except Exception as e:
raise e
finally:
sys.stdout = original_stdout
return captured_output.getvalue()
| accelerate/src/accelerate/test_utils/testing.py/0 | {
"file_path": "accelerate/src/accelerate/test_utils/testing.py",
"repo_id": "accelerate",
"token_count": 8669
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A set of basic tensor ops compatible with tpu, gpu, and multigpu
"""
import pickle
import warnings
from contextlib import contextmanager, nullcontext
from functools import update_wrapper, wraps
from typing import Any, Mapping
import torch
from ..state import AcceleratorState, PartialState
from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES
from .dataclasses import DistributedType, TensorInformation
from .imports import (
is_npu_available,
is_torch_distributed_available,
is_torch_xla_available,
is_xpu_available,
)
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
if is_torch_distributed_available():
from torch.distributed import ReduceOp
def is_torch_tensor(tensor):
return isinstance(tensor, torch.Tensor)
def is_torch_xpu_tensor(tensor):
return isinstance(
tensor,
torch.xpu.FloatTensor,
torch.xpu.ByteTensor,
torch.xpu.IntTensor,
torch.xpu.LongTensor,
torch.xpu.HalfTensor,
torch.xpu.DoubleTensor,
torch.xpu.BFloat16Tensor,
)
def is_tensor_information(tensor_info):
return isinstance(tensor_info, TensorInformation)
def is_namedtuple(data):
"""
Checks if `data` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a
`namedtuple` perfectly.
"""
return isinstance(data, tuple) and hasattr(data, "_asdict") and hasattr(data, "_fields")
def honor_type(obj, generator):
"""
Cast a generator to the same type as obj (list, tuple, or namedtuple)
"""
# Some objects may not be able to instantiate from a generator directly
if is_namedtuple(obj):
return type(obj)(*list(generator))
else:
return type(obj)(generator)
def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):
"""
Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.
Args:
func (`callable`):
The function to recursively apply.
data (nested list/tuple/dictionary of `main_type`):
The data on which to apply `func`
*args:
Positional arguments that will be passed to `func` when applied on the unpacked data.
main_type (`type`, *optional*, defaults to `torch.Tensor`):
The base type of the objects to which apply `func`.
error_on_other_type (`bool`, *optional*, defaults to `False`):
Whether to return an error or not if after unpacking `data`, we get on an object that is not of type
`main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged.
**kwargs (additional keyword arguments, *optional*):
Keyword arguments that will be passed to `func` when applied on the unpacked data.
Returns:
The same data structure as `data` with `func` applied to every object of type `main_type`.
"""
if isinstance(data, (tuple, list)):
return honor_type(
data,
(
recursively_apply(
func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
)
for o in data
),
)
elif isinstance(data, Mapping):
return type(data)(
{
k: recursively_apply(
func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
)
for k, v in data.items()
}
)
elif test_type(data):
return func(data, *args, **kwargs)
elif error_on_other_type:
raise TypeError(
f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of "
f"objects that are valid for `{test_type.__name__}` should be passed."
)
return data
def send_to_device(tensor, device, non_blocking=False, skip_keys=None):
"""
Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
Args:
tensor (nested list/tuple/dictionary of `torch.Tensor`):
The data to send to a given device.
device (`torch.device`):
The device to send the data to.
Returns:
The same data structure as `tensor` with all tensors sent to the proper device.
"""
if is_torch_tensor(tensor) or hasattr(tensor, "to"):
# `torch.Tensor.to("npu")` could not find context when called for the first time (see this [issue](https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue)).
if device == "npu":
device = "npu:0"
if device == "xpu":
device = "xpu:0"
try:
return tensor.to(device, non_blocking=non_blocking)
except TypeError: # .to() doesn't accept non_blocking as kwarg
return tensor.to(device)
except AssertionError as error:
# `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
# This call is inside the try-block since is_npu_available is not supported by torch.compile.
if is_npu_available():
if isinstance(device, int):
device = f"npu:{device}"
elif is_xpu_available():
if isinstance(device, int):
device = f"xpu:{device}"
else:
raise error
try:
return tensor.to(device, non_blocking=non_blocking)
except TypeError: # .to() doesn't accept non_blocking as kwarg
return tensor.to(device)
elif isinstance(tensor, (tuple, list)):
return honor_type(
tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)
)
elif isinstance(tensor, Mapping):
if isinstance(skip_keys, str):
skip_keys = [skip_keys]
elif skip_keys is None:
skip_keys = []
return type(tensor)(
{
k: t if k in skip_keys else send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys)
for k, t in tensor.items()
}
)
else:
return tensor
def get_data_structure(data):
"""
Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors.
Args:
data (nested list/tuple/dictionary of `torch.Tensor`):
The data to send to analyze.
Returns:
The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
"""
def _get_data_structure(tensor):
return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
return recursively_apply(_get_data_structure, data)
def get_shape(data):
"""
Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list.
Args:
data (nested list/tuple/dictionary of `torch.Tensor`):
The data to send to analyze.
Returns:
The same data structure as `data` with lists of tensor shapes instead of tensors.
"""
def _get_shape(tensor):
return list(tensor.shape)
return recursively_apply(_get_shape, data)
def initialize_tensors(data_structure):
"""
Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`].
Returns:
The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
"""
def _initialize_tensor(tensor_info):
return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
def find_batch_size(data):
"""
Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.
Args:
data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
Returns:
`int`: The batch size.
"""
if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0):
raise ValueError(f"Cannot find the batch size from empty {type(data)}.")
if isinstance(data, (tuple, list)):
return find_batch_size(data[0])
elif isinstance(data, Mapping):
for k in data.keys():
return find_batch_size(data[k])
elif not isinstance(data, torch.Tensor):
raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.")
return data.shape[0]
def ignorant_find_batch_size(data):
"""
Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised
Args:
data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
Returns:
`int`: The batch size.
"""
try:
return find_batch_size(data)
except (ValueError, TypeError):
pass
return None
def listify(data):
"""
Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers.
Args:
data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers.
Returns:
The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
"""
def _convert_to_list(tensor):
tensor = tensor.detach().cpu()
if tensor.dtype == torch.bfloat16:
# As of Numpy 1.21.4, NumPy does not support bfloat16 (see
# https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ).
# Until Numpy adds bfloat16, we must convert float32.
tensor = tensor.to(torch.float32)
return tensor.tolist()
return recursively_apply(_convert_to_list, data)
def _tpu_gather(tensor):
def _tpu_gather_one(tensor):
if tensor.ndim == 0:
tensor = tensor.clone()[None]
# Can only gather contiguous tensors
if not tensor.is_contiguous():
tensor = tensor.contiguous()
return xm.all_gather(tensor)
res = recursively_apply(_tpu_gather_one, tensor, error_on_other_type=True)
xm.mark_step()
return res
def _gpu_gather(tensor):
state = PartialState()
gather_op = torch.distributed.all_gather_into_tensor
def _gpu_gather_one(tensor):
if tensor.ndim == 0:
tensor = tensor.clone()[None]
# Can only gather contiguous tensors
if not tensor.is_contiguous():
tensor = tensor.contiguous()
if state.backend is not None and state.backend != "gloo":
# We use `empty` as `all_gather_into_tensor` slightly
# differs from `all_gather` for better efficiency,
# and we rely on the number of items in the tensor
# rather than its direct shape
output_tensors = torch.empty(
state.num_processes * tensor.numel(),
dtype=tensor.dtype,
device=state.device,
)
gather_op(output_tensors, tensor)
return output_tensors.view(-1, *tensor.size()[1:])
else:
# a backend of `None` is always CPU
# also gloo does not support `all_gather_into_tensor`,
# which will result in a larger memory overhead for the op
output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)]
torch.distributed.all_gather(output_tensors, tensor)
return torch.cat(output_tensors, dim=0)
return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
class DistributedOperationException(Exception):
"""
An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the
tensors.
"""
pass
def verify_operation(function):
"""
Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
"""
@wraps(function)
def wrapper(*args, **kwargs):
if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:
return function(*args, **kwargs)
operation = f"{function.__module__}.{function.__name__}"
if "tensor" in kwargs:
tensor = kwargs["tensor"]
else:
tensor = args[0]
if PartialState().device.type != find_device(tensor).type:
raise DistributedOperationException(
f"One or more of the tensors passed to {operation} were not on the {tensor.device.type} while the `Accelerator` is configured for {PartialState().device.type}. "
f"Please move it to the {PartialState().device.type} before calling {operation}."
)
shapes = get_shape(tensor)
output = gather_object([shapes])
if output[0] is not None:
are_same = output.count(output[0]) == len(output)
if not are_same:
process_shape_str = "\n - ".join([f"Process {i}: {shape}" for i, shape in enumerate(output)])
raise DistributedOperationException(
f"Cannot apply desired operation due to shape mismatches. "
"All shapes across devices must be valid."
f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}"
)
return function(*args, **kwargs)
return wrapper
def chained_operation(function):
"""
Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing
`DistributedOperationException`.
"""
@wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except DistributedOperationException as e:
operation = f"{function.__module__}.{function.__name__}"
raise DistributedOperationException(
f"Error found while calling `{operation}`. Please see the earlier error for more details."
) from e
return wrapper
@verify_operation
def gather(tensor):
"""
Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.
Args:
tensor (nested list/tuple/dictionary of `torch.Tensor`):
The data to gather.
Returns:
The same data structure as `tensor` with all tensors sent to the proper device.
"""
if PartialState().distributed_type == DistributedType.XLA:
return _tpu_gather(tensor)
elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
return _gpu_gather(tensor)
else:
return tensor
def _gpu_gather_object(object: Any):
output_objects = [None for _ in range(PartialState().num_processes)]
torch.distributed.all_gather_object(output_objects, object)
# all_gather_object returns a list of lists, so we need to flatten it
return [x for y in output_objects for x in y]
def gather_object(object: Any):
"""
Recursively gather object in a nested list/tuple/dictionary of objects from all devices.
Args:
object (nested list/tuple/dictionary of picklable object):
The data to gather.
Returns:
The same data structure as `object` with all the objects sent to every device.
"""
if PartialState().distributed_type == DistributedType.XLA:
raise NotImplementedError("gather objects in TPU is not supported")
elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
return _gpu_gather_object(object)
else:
return object
def _gpu_broadcast(data, src=0):
def _gpu_broadcast_one(tensor, src=0):
torch.distributed.broadcast(tensor, src=src)
return tensor
return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src)
def _tpu_broadcast(tensor, src=0, name="broadcast tensor"):
if isinstance(tensor, (list, tuple)):
return honor_type(tensor, (_tpu_broadcast(t, name=f"{name}_{i}") for i, t in enumerate(tensor)))
elif isinstance(tensor, Mapping):
return type(tensor)({k: _tpu_broadcast(v, name=f"{name}_{k}") for k, v in tensor.items()})
return xm.mesh_reduce(name, tensor, lambda x: x[src])
TENSOR_TYPE_TO_INT = {
torch.float: 1,
torch.double: 2,
torch.half: 3,
torch.bfloat16: 4,
torch.uint8: 5,
torch.int8: 6,
torch.int16: 7,
torch.int32: 8,
torch.int64: 9,
torch.bool: 10,
}
TENSOR_INT_TO_DTYPE = {v: k for k, v in TENSOR_TYPE_TO_INT.items()}
def gather_tensor_shape(tensor):
"""
Grabs the shape of `tensor` only available on one process and returns a tensor of its shape
"""
# Allocate 80 bytes to store the shape
max_tensor_dimension = 2**20
state = PartialState()
base_tensor = torch.empty(max_tensor_dimension, dtype=torch.int, device=state.device)
# Since PyTorch can't just send a tensor to another GPU without
# knowing its size, we store the size of the tensor with data
# in an allocation
if tensor is not None:
shape = tensor.shape
tensor_dtype = TENSOR_TYPE_TO_INT[tensor.dtype]
base_tensor[: len(shape) + 1] = torch.tensor(list(shape) + [tensor_dtype], dtype=int)
# Perform a reduction to copy the size data onto all GPUs
base_tensor = reduce(base_tensor, reduction="sum")
base_tensor = base_tensor[base_tensor.nonzero()]
# The last non-zero data contains the coded dtype the source tensor is
dtype = int(base_tensor[-1:][0])
base_tensor = base_tensor[:-1]
return base_tensor, dtype
def copy_tensor_to_devices(tensor=None) -> torch.Tensor:
"""
Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as
each worker doesn't need to know its shape when used (and tensor can be `None`)
Args:
tensor (`torch.tensor`):
The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest
should be `None`.
"""
state = PartialState()
shape, dtype = gather_tensor_shape(tensor)
if tensor is None:
tensor = torch.zeros(shape, dtype=TENSOR_INT_TO_DTYPE[dtype]).to(state.device)
return reduce(tensor, reduction="sum")
@verify_operation
def broadcast(tensor, from_process: int = 0):
"""
Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices.
Args:
tensor (nested list/tuple/dictionary of `torch.Tensor`):
The data to gather.
from_process (`int`, *optional*, defaults to 0):
The process from which to send the data
Returns:
The same data structure as `tensor` with all tensors broadcasted to the proper device.
"""
if PartialState().distributed_type == DistributedType.XLA:
return _tpu_broadcast(tensor, src=from_process, name="accelerate.utils.broadcast")
elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
return _gpu_broadcast(tensor, src=from_process)
else:
return tensor
def broadcast_object_list(object_list, from_process: int = 0):
"""
Broadcast a list of picklable objects form one process to the others.
Args:
object_list (list of picklable objects):
The list of objects to broadcast. This list will be modified inplace.
from_process (`int`, *optional*, defaults to 0):
The process from which to send the data.
Returns:
The same list containing the objects from process 0.
"""
if PartialState().distributed_type == DistributedType.XLA:
for i, obj in enumerate(object_list):
object_list[i] = xm.mesh_reduce("accelerate.utils.broadcast_object_list", obj, lambda x: x[from_process])
elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
torch.distributed.broadcast_object_list(object_list, src=from_process)
return object_list
def slice_tensors(data, tensor_slice, process_index=None, num_processes=None):
"""
Recursively takes a slice in a nested list/tuple/dictionary of tensors.
Args:
data (nested list/tuple/dictionary of `torch.Tensor`):
The data to slice.
tensor_slice (`slice`):
The slice to take.
Returns:
The same data structure as `data` with all the tensors slices.
"""
def _slice_tensor(tensor, tensor_slice):
return tensor[tensor_slice]
return recursively_apply(_slice_tensor, data, tensor_slice)
def concatenate(data, dim=0):
"""
Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.
Args:
data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`):
The data to concatenate.
dim (`int`, *optional*, defaults to 0):
The dimension on which to concatenate.
Returns:
The same data structure as `data` with all the tensors concatenated.
"""
if isinstance(data[0], (tuple, list)):
return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))
elif isinstance(data[0], Mapping):
return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})
elif not isinstance(data[0], torch.Tensor):
raise TypeError(f"Can only concatenate tensors but got {type(data[0])}")
return torch.cat(data, dim=dim)
class CannotPadNestedTensorWarning(UserWarning):
pass
@chained_operation
def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they
can safely be gathered.
Args:
tensor (nested list/tuple/dictionary of `torch.Tensor`):
The data to gather.
dim (`int`, *optional*, defaults to 0):
The dimension on which to pad.
pad_index (`int`, *optional*, defaults to 0):
The value with which to pad.
pad_first (`bool`, *optional*, defaults to `False`):
Whether to pad at the beginning or the end.
"""
def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
if getattr(tensor, "is_nested", False):
warnings.warn(
"Cannot pad nested tensors without more information. Leaving unprocessed.",
CannotPadNestedTensorWarning,
)
return tensor
if dim >= len(tensor.shape) or dim < -len(tensor.shape):
return tensor
# Convert negative dimensions to non-negative
if dim < 0:
dim += len(tensor.shape)
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = gather(size).cpu()
# Then pad to the maximum size
max_size = max(s[dim] for s in sizes)
if max_size == tensor.shape[dim]:
return tensor
old_size = tensor.shape
new_size = list(old_size)
new_size[dim] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
if pad_first:
indices = tuple(
slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size))
)
else:
indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
new_tensor[indices] = tensor
return new_tensor
return recursively_apply(
_pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first
)
def pad_input_tensors(tensor, batch_size, num_processes, dim=0):
"""
Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions.
New tensors are just the last input repeated.
E.g.:
Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4])
"""
def _pad_input_tensors(tensor, batch_size, num_processes, dim=0):
remainder = batch_size // num_processes
last_inputs = batch_size - (remainder * num_processes)
if batch_size // num_processes == 0:
to_pad = num_processes - batch_size
else:
to_pad = num_processes - (batch_size // num_processes)
# In the rare case that `to_pad` is negative,
# we need to pad the last inputs - the found `to_pad`
if last_inputs > to_pad & to_pad < 1:
to_pad = last_inputs - to_pad
old_size = tensor.shape
new_size = list(old_size)
new_size[0] = batch_size + to_pad
new_tensor = tensor.new_zeros(tuple(new_size))
indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
new_tensor[indices] = tensor
return new_tensor
return recursively_apply(
_pad_input_tensors,
tensor,
error_on_other_type=True,
batch_size=batch_size,
num_processes=num_processes,
dim=dim,
)
@verify_operation
def reduce(tensor, reduction="mean", scale=1.0):
"""
Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the
mean of a given operation.
Args:
tensor (nested list/tuple/dictionary of `torch.Tensor`):
The data to reduce.
reduction (`str`, *optional*, defaults to `"mean"`):
A reduction method. Can be of "mean", "sum", or "none"
scale (`float`, *optional*):
A default scaling value to be applied after the reduce, only valied on XLA.
Returns:
The same data structure as `data` with all the tensors reduced.
"""
def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
state = PartialState()
cloned_tensor = tensor.clone()
if state.distributed_type == DistributedType.NO:
return cloned_tensor
if state.distributed_type == DistributedType.XLA:
# Some processes may have different HLO graphs than other
# processes, for example in the breakpoint API
# accelerator.set_trigger(). Use mark_step to make HLOs
# the same on all processes.
xm.mark_step()
xm.all_reduce(xm.REDUCE_SUM, [cloned_tensor], scale)
xm.mark_step()
elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES:
torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)
if reduction == "mean":
cloned_tensor /= state.num_processes
return cloned_tensor
return recursively_apply(
_reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale
)
def convert_to_fp32(tensor):
"""
Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.
Args:
tensor (nested list/tuple/dictionary of `torch.Tensor`):
The data to convert from FP16/BF16 to FP32.
Returns:
The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
"""
def _convert_to_fp32(tensor):
return tensor.float()
def _is_fp16_bf16_tensor(tensor):
return (is_torch_tensor(tensor) or hasattr(tensor, "dtype")) and tensor.dtype in (
torch.float16,
torch.bfloat16,
)
return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
class ConvertOutputsToFp32:
"""
Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16
precision will be convert back to FP32.
Args:
model_forward (`Callable`):
The function which outputs we want to treat.
Returns:
The same function as `model_forward` but with converted outputs.
"""
def __init__(self, model_forward):
self.model_forward = model_forward
update_wrapper(self, model_forward)
def __call__(self, *args, **kwargs):
return convert_to_fp32(self.model_forward(*args, **kwargs))
def __getstate__(self):
raise pickle.PicklingError(
"Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it."
)
def convert_outputs_to_fp32(model_forward):
model_forward = ConvertOutputsToFp32(model_forward)
def forward(*args, **kwargs):
return model_forward(*args, **kwargs)
# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
forward.__wrapped__ = model_forward
return forward
def find_device(data):
"""
Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device).
Args:
(nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of.
"""
if isinstance(data, Mapping):
for obj in data.values():
device = find_device(obj)
if device is not None:
return device
elif isinstance(data, (tuple, list)):
for obj in data:
device = find_device(obj)
if device is not None:
return device
elif isinstance(data, torch.Tensor):
return data.device
@contextmanager
def GatheredParameters(params, modifier_rank=None, fwd_module=None, enabled=True):
"""
Wrapper around `deepspeed.runtime.zero.GatheredParameters`, but if Zero-3 is not enabled, will be a no-op context
manager.
"""
# We need to use the `AcceleratorState` here since it has access to the deepspeed plugin
if AcceleratorState().distributed_type != DistributedType.DEEPSPEED or (
AcceleratorState().deepspeed_plugin is not None
and not AcceleratorState().deepspeed_plugin.is_zero3_init_enabled()
):
gather_param_context = nullcontext()
else:
import deepspeed
gather_param_context = deepspeed.zero.GatheredParameters(
params, modifier_rank=modifier_rank, fwd_module=fwd_module, enabled=enabled
)
with gather_param_context:
yield
| accelerate/src/accelerate/utils/operations.py/0 | {
"file_path": "accelerate/src/accelerate/utils/operations.py",
"repo_id": "accelerate",
"token_count": 12875
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import gc
import logging
import os
import unittest
from collections import OrderedDict
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from transformers import AutoModelForCausalLM, AutoTokenizer
from accelerate.big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from accelerate.hooks import remove_hook_from_submodules
from accelerate.test_utils import (
require_bnb,
require_cuda,
require_cuda_or_xpu,
require_multi_device,
require_multi_gpu,
require_non_cpu,
require_non_torch_xla,
slow,
torch_device,
)
from accelerate.utils import offload_state_dict
logger = logging.getLogger(__name__)
torch_device = f"{torch_device}:0" if torch_device != "cpu" else "cpu"
class ModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class LinearWithNonPersistentBuffers(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.register_buffer("weight", torch.ones((out_features, in_features), **factory_kwargs))
if bias:
self.register_buffer("bias", torch.ones(out_features, **factory_kwargs), persistent=False)
else:
self.register_buffer("bias", None)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.linear(input, self.weight, self.bias)
class ModelForTestNonPersistentBuffers(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = LinearWithNonPersistentBuffers(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = LinearWithNonPersistentBuffers(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class ModelForTestCopy(nn.Module):
def __init__(self, id: int):
super().__init__()
self.id = id
self.linear1 = nn.Linear(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x))), self.id
class ModelForTestTiedWeights(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(4, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 4)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class BiggerModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.linear2 = nn.Linear(4, 5)
self.batchnorm = nn.BatchNorm1d(5)
self.linear3 = nn.Linear(5, 6)
self.linear4 = nn.Linear(6, 5)
def forward(self, x):
return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))
# To test preload_module_classes
class ModuleWithUnusedSubModules(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
return x @ self.linear.weight.t() + self.linear.bias
class ModelWithUnusedSubModulesForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = ModuleWithUnusedSubModules(3, 4)
self.linear2 = ModuleWithUnusedSubModules(4, 5)
self.batchnorm = nn.BatchNorm1d(5)
self.linear3 = ModuleWithUnusedSubModules(5, 6)
self.linear4 = ModuleWithUnusedSubModules(6, 5)
def forward(self, x):
return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))
class BigModelingTester(unittest.TestCase):
def test_init_empty_weights(self):
# base use
with init_empty_weights():
module = nn.Linear(4, 5)
assert module.weight.device == torch.device("meta")
# base use with buffers, they are not touched
with init_empty_weights():
module = nn.BatchNorm1d(4)
assert module.weight.device == torch.device("meta")
assert module.running_mean.device == torch.device("cpu")
# Use with include_buffers=True
register_parameter_func = nn.Module.register_parameter
register_buffer_func = nn.Module.register_buffer
with init_empty_weights(include_buffers=True):
module = nn.BatchNorm1d(4)
# nn.Module.register_parameter/buffer shouldn't be changed with torch >= 2.0
assert register_parameter_func == nn.Module.register_parameter
assert register_buffer_func == nn.Module.register_buffer
assert module.weight.device == torch.device("meta")
assert module.running_mean.device == torch.device("meta")
# Double check we didn't break PyTorch
module = nn.BatchNorm1d(4)
assert module.weight.device == torch.device("cpu")
assert module.running_mean.device == torch.device("cpu")
def test_init_empty_weights_very_large_model(self):
# This is a 100 billion parameters model.
with init_empty_weights():
_ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
@require_non_cpu
def test_init_on_device(self):
device = torch.device(torch_device)
with init_on_device(device):
model = nn.Linear(10, 10)
assert model.weight.device == device
assert model.weight.device == device
def test_cpu_offload(self):
model = ModelForTest()
x = torch.randn(2, 3)
expected = model(x)
device = torch.device(torch_device)
cpu_offload(model, execution_device=device)
output = model(x)
assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}"
# Clean up for next test.
remove_hook_from_submodules(model)
cpu_offload(model, execution_device=device, offload_buffers=True)
output = model(x)
assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}"
def test_cpu_offload_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
x = torch.randn(2, 3)
expected = model(x)
device = torch.device(torch_device)
cpu_offload(model, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"])
output = model(x)
assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}"
# Clean up for next test.
remove_hook_from_submodules(model)
cpu_offload(
model,
execution_device=device,
offload_buffers=True,
preload_module_classes=["ModuleWithUnusedSubModules"],
)
output = model(x)
assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}"
@slow
@require_non_cpu
def test_cpu_offload_gpt2(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(torch_device)
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
cpu_offload(gpt2, execution_device=0)
outputs = gpt2.generate(inputs["input_ids"], max_new_tokens=10)
assert tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at"
def test_disk_offload(self):
model = ModelForTest()
x = torch.randn(2, 3)
expected = model(x)
device = torch.device(torch_device)
with TemporaryDirectory() as tmp_dir:
disk_offload(model, tmp_dir, execution_device=device)
output = model(x)
assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}"
# Clean up for next test.
remove_hook_from_submodules(model)
with TemporaryDirectory() as tmp_dir:
disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True)
output = model(x)
assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}"
def test_disk_offload_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
x = torch.randn(2, 3)
expected = model(x)
device = torch.device(torch_device)
with TemporaryDirectory() as tmp_dir:
disk_offload(
model, tmp_dir, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"]
)
output = model(x)
assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}"
# Clean up for next test.
remove_hook_from_submodules(model)
with TemporaryDirectory() as tmp_dir:
disk_offload(
model,
tmp_dir,
execution_device=device,
offload_buffers=True,
preload_module_classes=["ModuleWithUnusedSubModules"],
)
output = model(x)
assert torch.allclose(expected, output.cpu(), 1e-4, 1e-5), f"Expected: {expected}, Actual: {output.cpu()}"
@slow
@require_non_cpu
def test_disk_offload_gpt2(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(torch_device)
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
with TemporaryDirectory() as tmp_dir:
disk_offload(gpt2, tmp_dir, execution_device=0)
outputs = gpt2.generate(inputs["input_ids"], max_new_tokens=10)
assert tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at"
@require_non_cpu
def test_dispatch_model_and_remove_hook(self):
model = ModelForTest()
device_map = {"linear1": "cpu", "batchnorm": "cpu", "linear2": 0}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(model, device_map, offload_dir=tmp_dir)
output = model(x)
remove_hook_from_submodules(model)
# need to check if we get any warning
with self.assertLogs(level="WARNING") as cm:
# We want to assert there are no warnings, but the 'assertLogs' method does not support that.
# Therefore, we are adding a dummy warning, and then we will assert it is the only warning.
model.to(torch_device)
logger.warning("Dummy warning")
self.assertEqual(len(cm.records), 1)
self.assertIn(
"Dummy warning",
cm.records[0].message,
)
output_bis = model(x.to(torch_device))
assert torch.allclose(expected, output.cpu(), atol=1e-5)
assert torch.allclose(expected, output_bis.cpu(), atol=1e-5)
@require_non_cpu
def test_dispatch_model(self):
model = ModelForTest()
device_map = {"linear1": "disk", "batchnorm": "cpu", "linear2": 0}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(model, device_map, offload_dir=tmp_dir)
output = model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_non_cpu
def test_dispatch_model_with_non_persistent_buffers(self):
model = ModelForTestNonPersistentBuffers()
device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": "disk"}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(model, device_map, offload_dir=tmp_dir, offload_buffers=True)
output = model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_non_cpu
def test_dispatch_model_tied_weights(self):
model = ModelForTestTiedWeights()
model.linear1.weight = model.linear2.weight
device_map = {"linear1": 0, "batchnorm": 0, "linear2": 0}
dispatch_model(model, device_map)
assert model.linear2.weight is model.linear1.weight
@require_multi_gpu
def test_dispatch_model_tied_weights_memory(self):
# Test that we do not duplicate tied weights at any point during dispatch_model call.
torch.cuda.empty_cache() # Needed in case we run several tests in a row.
model = nn.Sequential(
OrderedDict(
[
("linear0", nn.Linear(5000, 5000, bias=False)),
("linear1", nn.Linear(5000, 5000, bias=False)),
("linear2", nn.Linear(5000, 5000, bias=False)),
("linear3", nn.Linear(5000, 5000, bias=False)),
("linear4", nn.Linear(5000, 5000, bias=False)),
]
)
)
model.linear2.weight = model.linear0.weight
model.linear3.weight = model.linear0.weight
model.linear4.weight = model.linear0.weight
x = torch.randn(5, 5000)
with torch.no_grad():
expected = model(x)
# We should need only 5000 * 5000 * 32 // 8 * 1e-6 = 100 MB on the device 0 for the four linear weights.
device_map = {"linear0": 0, "linear1": 1, "linear2": 0, "linear3": 0, "linear4": 0}
# Just to intialize CUDA context.
a = torch.rand(5).to("cuda:0") # noqa: F841
free_memory_bytes = torch.cuda.mem_get_info("cuda:0")[0]
required_memory_bytes = 5000 * 5000 * (32 // 8)
# Leaving 50 MB of free memory for possible buffers, etc.
n_vals = (free_memory_bytes - required_memory_bytes - int(50e6)) // (32 // 8)
foo = torch.rand(n_vals, device="cuda:0") # noqa: F841
# If this does OOM: there is an issue in somewhere in dispatch_model, memory of tied weights is duplicated.
try:
dispatch_model(model, device_map)
except torch.cuda.OutOfMemoryError as e:
raise torch.cuda.OutOfMemoryError(
f"OOM error in dispatch_model. This is a bug and should not happen, see test_dispatch_model_tied_weights_memory. {e}"
)
except Exception as e:
raise e
with torch.no_grad():
output = model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_cuda
def test_dispatch_model_tied_weights_memory_with_nested_offload_cpu(self):
# Test that we do not duplicate tied weights at any point during dispatch_model call.
torch.cuda.empty_cache() # Needed in case we run several tests in a row.
class SubModule(torch.nn.Module):
def __init__(self, ref_to_parameter):
super().__init__()
self.parameter = ref_to_parameter
def forward(self, x):
return x + torch.max(self.parameter)
class LinearModuleAndSubModule(torch.nn.Linear):
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features, bias=False)
self.weight_submodule = SubModule(self.weight)
self.weight_submodule2 = SubModule(self.weight)
self.weight_submodule3 = SubModule(self.weight)
self.weight_submodule4 = SubModule(self.weight)
def forward(self, x):
a = torch.nn.functional.linear(self.weight_submodule(x), self.weight)
b = torch.nn.functional.linear(self.weight_submodule2(x), self.weight)
c = torch.nn.functional.linear(self.weight_submodule3(x), self.weight)
d = torch.nn.functional.linear(self.weight_submodule4(x), self.weight)
return a + b + c + d
class ModelWithSubmodules(torch.nn.Module):
def __init__(self):
super().__init__()
self.compute = LinearModuleAndSubModule(5000, 5000)
self.compute1 = LinearModuleAndSubModule(5000, 5000)
def forward(self, x):
a = self.compute(x)
b = self.compute1(x)
return a + b
# We should need only 2 * 5000 * 5000 * 32 // 8 * 1e-6 = 200 MB on the device 0 for the whole model forward, and not 600 MB.
device_map = {"compute": 0, "compute1": "cpu"}
model = ModelWithSubmodules()
x = torch.randn(1, 5000)
with torch.no_grad():
expected = model(x)
# Just to intialize CUDA context.
a = torch.rand(5).to("cuda:0") # noqa: F841
free_memory_bytes = torch.cuda.mem_get_info("cuda:0")[0]
required_memory_bytes = 2 * 5000 * 5000 * (32 // 8) # 200 MB
# Leaving 150 MB of free memory for possible buffers, etc.
n_vals = (free_memory_bytes - required_memory_bytes - int(150e6)) // (32 // 8)
foo = torch.rand(n_vals, device="cuda:0") # noqa: F841
free_memory_bytes_before_dispatch = torch.cuda.mem_get_info("cuda:0")[0]
dispatch_model(model, device_map)
free_memory_bytes_after_dispatch = torch.cuda.mem_get_info("cuda:0")[0]
assert (free_memory_bytes_after_dispatch - free_memory_bytes_before_dispatch) * 1e-6 < 130
original_pointer = model.compute1._hf_hook.weights_map["weight"].data_ptr()
with torch.no_grad():
try:
output = model(x)
except torch.cuda.OutOfMemoryError as e:
raise torch.cuda.OutOfMemoryError(
f"OOM error in dispatch_model. This is a bug and should not happen, see test_dispatch_model_tied_weights_memory_with_nested_offload_cpu. {e}"
)
except Exception as e:
raise e
assert torch.allclose(expected, output.cpu(), atol=1e-5)
torch.cuda.empty_cache()
free_memory_bytes_after_infer = torch.cuda.mem_get_info("cuda:0")[0]
# Check that we have no more references on GPU for the offloaded tied weight.
assert len(model.compute1.weight_submodule._hf_hook.tied_params_map[original_pointer]) == 0
assert len(model.compute1._hf_hook.tied_params_map[original_pointer]) == 0
assert (free_memory_bytes_after_infer - free_memory_bytes_after_dispatch) * 1e-6 < 130
# Test is flacky otherwise.
del model
gc.collect()
# This test fails because sometimes data_ptr() of compute2.weight is the same as compute1.weight.
# I checked that the values are not the same but it gives the same address. This does not happen on my local machine.
@require_cuda
@unittest.skip(
"Flaky test, we should have enough coverage with test_dispatch_model_tied_weights_memory_with_nested_offload_cpu test"
)
def test_dispatch_model_tied_weights_memory_with_nested_offload_disk(self):
# Test that we do not duplicate tied weights at any point during dispatch_model call.
torch.cuda.empty_cache() # Needed in case we run several tests in a row.
class SubModule(torch.nn.Module):
def __init__(self, ref_to_parameter):
super().__init__()
self.parameter = ref_to_parameter
def forward(self, x):
return x + torch.max(self.parameter)
class LinearModuleAndSubModule(torch.nn.Linear):
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features, bias=False)
self.weight_submodule = SubModule(self.weight)
self.weight_submodule2 = SubModule(self.weight)
self.weight_submodule3 = SubModule(self.weight)
self.weight_submodule4 = SubModule(self.weight)
def forward(self, x):
a = torch.nn.functional.linear(self.weight_submodule(x), self.weight)
b = torch.nn.functional.linear(self.weight_submodule2(x), self.weight)
c = torch.nn.functional.linear(self.weight_submodule3(x), self.weight)
d = torch.nn.functional.linear(self.weight_submodule4(x), self.weight)
return a + b + c + d
class ModelWithSubmodules(torch.nn.Module):
def __init__(self):
super().__init__()
self.compute = LinearModuleAndSubModule(5000, 5000)
self.compute1 = LinearModuleAndSubModule(5000, 5000)
def forward(self, x):
a = self.compute(x)
b = self.compute1(x)
return a + b
# We should need only 2 * 5000 * 5000 * 32 // 8 * 1e-6 = 200 MB on the device 0 for the whole model forward, and not 600 MB.
device_map = {"compute": 0, "compute1": "disk"}
model = ModelWithSubmodules()
x = torch.randn(1, 5000)
with torch.no_grad():
expected = model(x)
# Just to intialize CUDA context.
a = torch.rand(5).to("cuda:0") # noqa: F841
free_memory_bytes = torch.cuda.mem_get_info("cuda:0")[0]
required_memory_bytes = 2 * 5000 * 5000 * (32 // 8) # 200 MB
# Leaving 150 MB of free memory for possible buffers, etc.
n_vals = (free_memory_bytes - required_memory_bytes - int(200e6)) // (32 // 8)
foo = torch.rand(n_vals, device="cuda:0") # noqa: F841
free_memory_bytes_before_dispatch = torch.cuda.mem_get_info("cuda:0")[0]
with TemporaryDirectory() as tmp_dir:
dispatch_model(model, device_map, offload_dir=tmp_dir)
free_memory_bytes_after_dispatch = torch.cuda.mem_get_info("cuda:0")[0]
assert (free_memory_bytes_after_dispatch - free_memory_bytes_before_dispatch) * 1e-6 < 130
with torch.no_grad():
try:
output = model(x)
except torch.cuda.OutOfMemoryError as e:
raise torch.cuda.OutOfMemoryError(
f"OOM error in dispatch_model. This is a bug and should not happen, see test_dispatch_model_tied_weights_memory_with_nested_offload_disk. {e}"
)
except Exception as e:
raise e
assert torch.allclose(expected, output.cpu(), atol=1e-5)
torch.cuda.empty_cache()
free_memory_bytes_after_infer = torch.cuda.mem_get_info("cuda:0")[0]
# Check that we have no more references on GPU for the offloaded tied weight.
n_non_empty = 0
for pointer, pointer_dict in model.compute1.weight_submodule._hf_hook.tied_params_map.items():
if len(pointer_dict) > 0:
n_non_empty += 1
assert n_non_empty == 1 # `compute` layer one.
n_non_empty = 0
for pointer, pointer_dict in model.compute1._hf_hook.tied_params_map.items():
if len(pointer_dict) > 0:
n_non_empty += 1
assert n_non_empty == 1 # `compute` layer one.
assert (free_memory_bytes_after_infer - free_memory_bytes_after_dispatch) * 1e-6 < 130
@require_multi_device
def test_dispatch_model_multi_devices(self):
model = BiggerModelForTest()
device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(model, device_map, offload_dir=tmp_dir)
output = model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_non_cpu
def test_dispatch_model_copy(self):
original_model = ModelForTestCopy(id=1)
device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 0}
x = torch.randn(2, 3)
expected, original_output_id = original_model(x)
dispatch_model(original_model, device_map)
copied_model = copy.deepcopy(original_model)
copied_model.id = 2
output, copied_output_id = copied_model(x)
assert original_model.id == original_output_id
assert copied_model.id == copied_output_id
assert copied_model.linear1.forward is not original_model.linear1.forward
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_non_cpu
def test_dispatch_model_move_offloaded_model(self):
model = ModelForTest()
device_map = {"linear1": "disk", "batchnorm": "cpu", "linear2": 0}
with TemporaryDirectory() as tmp_dir:
dispatch_model(model, device_map, offload_dir=tmp_dir)
with self.assertRaises(RuntimeError):
model.to(0)
@require_multi_device
def test_dispatch_model_move_model_warning(self):
model = ModelForTest()
device_map = {"linear1": 0, "batchnorm": 0, "linear2": 1}
with TemporaryDirectory() as tmp_dir:
dispatch_model(model, device_map, offload_dir=tmp_dir)
with self.assertLogs("accelerate.big_modeling", level="WARNING"):
model.to("cpu")
with self.assertLogs("accelerate.big_modeling", level="WARNING"):
model.to(torch_device)
with self.assertRaises(RuntimeError):
x = torch.randn(2, 3)
model(x)
@slow
@require_multi_device
def test_dispatch_model_gpt2_on_two_devices(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(torch_device)
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
# Dispatch on GPUs 0 and 1
device_map = {
"transformer.wte": 0,
"transformer.wpe": 0,
"transformer.ln_f": 1,
"lm_head": 0,
}
for i in range(12):
device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1
gpt2 = dispatch_model(gpt2, device_map)
outputs = gpt2.generate(inputs["input_ids"], max_new_tokens=10)
assert tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at"
# Dispatch with a bit of CPU offload
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
for i in range(4):
device_map[f"transformer.h.{i}"] = "cpu"
gpt2 = dispatch_model(gpt2, device_map)
outputs = gpt2.generate(inputs["input_ids"], max_new_tokens=10)
assert tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at"
# Dispatch with a bit of CPU and disk offload
gpt2 = AutoModelForCausalLM.from_pretrained("gpt2")
for i in range(2):
device_map[f"transformer.h.{i}"] = "disk"
with TemporaryDirectory() as tmp_dir:
state_dict = {
k: p for k, p in gpt2.state_dict().items() if "transformer.h.0" in k or "transformer.h.1" in k
}
offload_state_dict(tmp_dir, state_dict)
gpt2 = dispatch_model(gpt2, device_map, offload_dir=tmp_dir)
outputs = gpt2.generate(inputs["input_ids"], max_new_tokens=10)
assert tokenizer.decode(outputs[0].tolist()) == "Hello world! My name is Kiyoshi, and I'm a student at"
@require_non_cpu
def test_dispatch_model_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 0}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(
model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"]
)
output = model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_multi_device
def test_dispatch_model_with_unused_submodules_multi_device(self):
model = ModelWithUnusedSubModulesForTest()
device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
dispatch_model(
model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"]
)
output = model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_non_cpu
def test_dispatch_model_force_hooks(self):
model = ModelForTest()
device_map = {"": 0}
x = torch.randn(2, 3)
expected = model(x)
dispatch_model(model, device_map, force_hooks=True)
output = model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_non_cpu
def test_load_checkpoint_and_dispatch(self):
model = ModelForTest()
device_map = {"linear1": "cpu", "batchnorm": "cpu", "linear2": 0}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
checkpoint = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), checkpoint)
new_model = ModelForTest()
new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map)
# CPU-offloaded weights are on the meta device while waiting for the forward pass.
assert new_model.linear1.weight.device == torch.device("meta")
assert new_model.linear2.weight.device == torch.device(torch_device)
output = new_model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_multi_device
def test_load_checkpoint_and_dispatch_multi_device(self):
model = BiggerModelForTest()
device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
checkpoint = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), checkpoint)
new_model = BiggerModelForTest()
new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map)
# CPU-offloaded weights are on the meta device while waiting for the forward pass.
assert new_model.linear1.weight.device == torch.device("meta")
assert new_model.linear2.weight.device == torch.device("meta")
assert new_model.linear3.weight.device == torch.device(torch_device)
assert new_model.linear4.weight.device == torch.device(torch_device.replace(":0", ":1"))
output = new_model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_non_cpu
def test_load_checkpoint_and_dispatch_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 0}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
checkpoint = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), checkpoint)
new_model = ModelWithUnusedSubModulesForTest()
new_model = load_checkpoint_and_dispatch(
new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"]
)
# CPU-offloaded weights are on the meta device while waiting for the forward pass.
assert new_model.linear1.linear.weight.device == torch.device("meta")
assert new_model.linear2.linear.weight.device == torch.device("meta")
assert new_model.linear3.linear.weight.device == torch.device(torch_device)
assert new_model.linear4.linear.weight.device == torch.device(torch_device)
output = new_model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_multi_device
def test_load_checkpoint_and_dispatch_multi_device_with_unused_submodules(self):
model = ModelWithUnusedSubModulesForTest()
device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1}
x = torch.randn(2, 3)
expected = model(x)
with TemporaryDirectory() as tmp_dir:
checkpoint = os.path.join(tmp_dir, "pt_model.bin")
torch.save(model.state_dict(), checkpoint)
new_model = ModelWithUnusedSubModulesForTest()
new_model = load_checkpoint_and_dispatch(
new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"]
)
# CPU-offloaded weights are on the meta device while waiting for the forward pass.
assert new_model.linear1.linear.weight.device == torch.device("meta")
assert new_model.linear2.linear.weight.device == torch.device("meta")
assert new_model.linear3.linear.weight.device == torch.device(torch_device)
assert new_model.linear4.linear.weight.device == torch.device(torch_device.replace(":0", ":1"))
output = new_model(x)
assert torch.allclose(expected, output.cpu(), atol=1e-5)
@require_non_cpu
def test_cpu_offload_with_hook(self):
model1 = torch.nn.Linear(4, 5)
model1, hook1 = cpu_offload_with_hook(model1)
assert model1.weight.device == torch.device("cpu")
inputs = torch.randn(3, 4)
outputs = model1(inputs)
assert outputs.device == torch.device(torch_device)
assert model1.weight.device == torch.device(torch_device)
hook1.offload()
assert model1.weight.device == torch.device("cpu")
model2 = torch.nn.Linear(5, 5)
model2, hook2 = cpu_offload_with_hook(model2, prev_module_hook=hook1)
assert model2.weight.device == torch.device("cpu")
outputs = model1(inputs)
assert outputs.device == torch.device(torch_device)
assert model1.weight.device == torch.device(torch_device)
outputs = model2(outputs)
assert outputs.device == torch.device(torch_device)
assert model1.weight.device == torch.device("cpu")
assert model2.weight.device == torch.device(torch_device)
hook2.offload()
assert model2.weight.device == torch.device("cpu")
@require_non_torch_xla
@slow
@require_bnb
@require_multi_device
def test_dispatch_model_bnb(self):
"""Tests that `dispatch_model` quantizes int8 layers"""
from huggingface_hub import hf_hub_download
from transformers import AutoConfig, AutoModel, BitsAndBytesConfig
from transformers.utils.bitsandbytes import replace_with_bnb_linear
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map="balanced",
)
assert model.h[0].self_attention.query_key_value.weight.dtype == torch.int8
assert model.h[0].self_attention.query_key_value.weight.device.index == 0
assert model.h[(-1)].self_attention.query_key_value.weight.dtype == torch.int8
assert model.h[(-1)].self_attention.query_key_value.weight.device.index == 1
@require_cuda_or_xpu
@slow
@require_bnb
def test_dispatch_model_int8_simple(self):
"""Tests that `dispatch_model` quantizes int8 layers"""
from huggingface_hub import hf_hub_download
from transformers import AutoConfig, AutoModel, BitsAndBytesConfig
from transformers.utils.bitsandbytes import replace_with_bnb_linear
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
# test with auto
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map="auto",
)
assert model.h[0].self_attention.query_key_value.weight.dtype == torch.int8
assert model.h[0].self_attention.query_key_value.weight.device.index == 0
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
# test with str device map
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map={"": torch_device},
)
assert model.h[0].self_attention.query_key_value.weight.dtype == torch.int8
assert model.h[0].self_attention.query_key_value.weight.device.index == 0
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
# test with torch.device device map
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map={"": torch_device},
)
assert model.h[0].self_attention.query_key_value.weight.dtype == torch.int8
assert model.h[0].self_attention.query_key_value.weight.device.index == 0
@require_cuda_or_xpu
@slow
@require_bnb
def test_dipatch_model_fp4_simple(self):
"""Tests that `dispatch_model` quantizes fp4 layers"""
from huggingface_hub import hf_hub_download
from transformers import AutoConfig, AutoModel, BitsAndBytesConfig
from transformers.utils.bitsandbytes import replace_with_bnb_linear
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
model_path = hf_hub_download("bigscience/bloom-560m", "pytorch_model.bin")
# test with auto
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map="auto",
)
assert model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8
assert model.h[0].self_attention.query_key_value.weight.device.index == 0
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
# test with str device map
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map={"": torch_device},
)
assert model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8
assert model.h[0].self_attention.query_key_value.weight.device.index == 0
with init_empty_weights():
model = AutoModel.from_config(AutoConfig.from_pretrained("bigscience/bloom-560m"))
model = replace_with_bnb_linear(
model, modules_to_not_convert=["lm_head"], quantization_config=quantization_config
)
# test with torch.device device map
model = load_checkpoint_and_dispatch(
model,
checkpoint=model_path,
device_map={"": torch_device},
)
assert model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8
assert model.h[0].self_attention.query_key_value.weight.device.index == 0
| accelerate/tests/test_big_modeling.py/0 | {
"file_path": "accelerate/tests/test_big_modeling.py",
"repo_id": "accelerate",
"token_count": 18483
} |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import (
DEFAULT_LAUNCH_COMMAND,
execute_subprocess_async,
path_in_accelerate_package,
require_multi_device,
require_non_cpu,
)
from accelerate.test_utils.testing import slow
from accelerate.utils import AutocastKwargs, KwargsHandler, ProfileKwargs, TorchDynamoPlugin, clear_environment
from accelerate.utils.dataclasses import DistributedType
@dataclass
class MockClass(KwargsHandler):
a: int = 0
b: bool = False
c: float = 3.0
class KwargsHandlerTester(unittest.TestCase):
def test_kwargs_handler(self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
assert MockClass().to_kwargs() == {}
assert MockClass(a=2).to_kwargs() == {"a": 2}
assert MockClass(a=2, b=True).to_kwargs() == {"a": 2, "b": True}
assert MockClass(a=2, c=2.25).to_kwargs() == {"a": 2, "c": 2.25}
@require_non_cpu
def test_grad_scaler_kwargs(self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
scaler_handler = GradScalerKwargs(init_scale=1024, growth_factor=2)
AcceleratorState._reset_state()
accelerator = Accelerator(mixed_precision="fp16", kwargs_handlers=[scaler_handler])
assert accelerator.mixed_precision == "fp16"
scaler = accelerator.scaler
# Check the kwargs have been applied
assert scaler._init_scale == 1024.0
assert scaler._growth_factor == 2.0
# Check the other values are at the default
assert scaler._backoff_factor == 0.5
assert scaler._growth_interval == 2000
assert scaler._enabled is True
@require_multi_device
def test_ddp_kwargs(self):
cmd = DEFAULT_LAUNCH_COMMAND + [inspect.getfile(self.__class__)]
execute_subprocess_async(cmd)
@require_non_cpu
def test_autocast_kwargs(self):
kwargs = AutocastKwargs(enabled=False)
AcceleratorState._reset_state()
accelerator = Accelerator(mixed_precision="fp16")
a_float32 = torch.rand((8, 8), device=accelerator.device)
b_float32 = torch.rand((8, 8), device=accelerator.device)
c_float32 = torch.rand((8, 8), device=accelerator.device)
d_float32 = torch.rand((8, 8), device=accelerator.device)
with accelerator.autocast():
e_float16 = torch.mm(a_float32, b_float32)
assert e_float16.dtype == torch.float16
with accelerator.autocast(autocast_handler=kwargs):
# Convert e_float16 to float32
f_float32 = torch.mm(c_float32, e_float16.float())
assert f_float32.dtype == torch.float32
g_float16 = torch.mm(d_float32, f_float32)
# We should be back in fp16
assert g_float16.dtype == torch.float16
@slow
def test_profile_kwargs(self):
# Arrange
schedule_options = [
dict(wait=1, warmup=1, active=2, repeat=1),
dict(wait=2, warmup=2, active=2, repeat=2),
dict(wait=0, warmup=1, active=3, repeat=3, skip_first=1),
dict(wait=3, warmup=2, active=1, repeat=1, skip_first=2),
dict(wait=1, warmup=0, active=1, repeat=5),
]
total_steps = 100
for option in schedule_options:
count = 0
table_outputs = []
steps_per_cycle = option["wait"] + option["warmup"] + option["active"]
effective_steps = max(0, total_steps - option.get("skip_first", 0))
cycles = effective_steps // steps_per_cycle
if option["repeat"] > 0:
expected_count = min(cycles, option["repeat"])
else:
expected_count = cycles
def on_trace_ready(prof):
nonlocal count
nonlocal table_outputs
count += 1
table_outputs.append(prof.key_averages().table(sort_by="cpu_time_total", row_limit=-1))
kwargs = ProfileKwargs(activities=["cpu"], on_trace_ready=on_trace_ready, schedule_option=option)
accelerator = Accelerator(kwargs_handlers=[kwargs])
# Act
with accelerator.profile() as prof:
for _ in range(total_steps):
prof.step()
torch.tensor([1, 2, 3, 4, 5], device=accelerator.device)
# Assert
assert isinstance(prof, torch.profiler.profile)
assert count == expected_count, f"Option: {option}, Expected count: {expected_count}, but got {count}"
for output in table_outputs:
self.assertIn("CPU time total:", output)
def test_torch_dynamo_plugin(self):
with clear_environment():
prefix = "ACCELERATE_DYNAMO_"
# nvfuser's dynamo backend name is "nvprims_nvfuser"
# use "nvfuser" here to cause exception if this test causes os.environ changed permanently
os.environ[prefix + "BACKEND"] = "aot_ts_nvfuser"
os.environ[prefix + "MODE"] = "reduce-overhead"
dynamo_plugin_kwargs = TorchDynamoPlugin().to_kwargs()
assert dynamo_plugin_kwargs == {"backend": "aot_ts_nvfuser", "mode": "reduce-overhead"}
assert os.environ.get(prefix + "BACKEND") != "aot_ts_nvfuser"
@require_multi_device
def test_ddp_comm_hook(self):
cmd = DEFAULT_LAUNCH_COMMAND + [path_in_accelerate_package("test_utils", "scripts", "test_ddp_comm_hook.py")]
execute_subprocess_async(cmd)
def main():
ddp_scaler = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
accelerator = Accelerator(kwargs_handlers=[ddp_scaler])
# Skip this test due to TorchXLA not using torch.nn.parallel.DistributedDataParallel for model wrapping.
if accelerator.distributed_type == DistributedType.XLA:
return
model = torch.nn.Linear(100, 200)
model = accelerator.prepare(model)
# Check the values changed in kwargs
error_msg = ""
observed_bucket_cap_map = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
if __name__ == "__main__":
main()
| accelerate/tests/test_kwargs_handlers.py/0 | {
"file_path": "accelerate/tests/test_kwargs_handlers.py",
"repo_id": "accelerate",
"token_count": 3261
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import logging
import os
import re
import subprocess
import tempfile
import unittest
import zipfile
from pathlib import Path
from typing import Optional
from unittest import mock
import numpy as np
import torch
from packaging import version
# We use TF to parse the logs
from accelerate import Accelerator
from accelerate.test_utils.testing import (
MockingTestCase,
TempDirTestCase,
require_clearml,
require_comet_ml,
require_dvclive,
require_pandas,
require_tensorboard,
require_wandb,
skip,
)
from accelerate.tracking import CometMLTracker, GeneralTracker
from accelerate.utils import (
ProjectConfiguration,
is_comet_ml_available,
is_dvclive_available,
is_tensorboard_available,
)
if is_comet_ml_available():
from comet_ml import OfflineExperiment
if is_tensorboard_available():
import struct
import tensorboard.compat.proto.event_pb2 as event_pb2
if is_dvclive_available():
from dvclive.plots.metric import Metric
from dvclive.serialize import load_yaml
from dvclive.utils import parse_metrics
logger = logging.getLogger(__name__)
@require_tensorboard
class TensorBoardTrackingTest(unittest.TestCase):
@unittest.skipIf(version.parse(np.__version__) >= version.parse("2.0"), "TB doesn't support numpy 2.0")
def test_init_trackers(self):
project_name = "test_project_with_config"
with tempfile.TemporaryDirectory() as dirpath:
accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath)
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
accelerator.init_trackers(project_name, config)
accelerator.end_training()
for child in Path(f"{dirpath}/{project_name}").glob("*/**"):
log = list(filter(lambda x: x.is_file(), child.iterdir()))[0]
assert str(log) != ""
def test_log(self):
project_name = "test_project_with_log"
with tempfile.TemporaryDirectory() as dirpath:
accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath)
accelerator.init_trackers(project_name)
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
accelerator.log(values, step=0)
accelerator.end_training()
# Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord
# Names are randomly generated each time
log = list(filter(lambda x: x.is_file(), Path(f"{dirpath}/{project_name}").iterdir()))[0]
assert str(log) != ""
def test_log_with_tensor(self):
project_name = "test_project_with_log"
with tempfile.TemporaryDirectory() as dirpath:
accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath)
accelerator.init_trackers(project_name)
values = {"tensor": torch.tensor(1)}
accelerator.log(values, step=0)
accelerator.end_training()
# Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord
# Names are randomly generated each time
log = list(filter(lambda x: x.is_file(), Path(f"{dirpath}/{project_name}").iterdir()))[0]
# Reading implementation based on https://github.com/pytorch/pytorch/issues/45327#issuecomment-703757685
with open(log, "rb") as f:
data = f.read()
found_tensor = False
while data:
header = struct.unpack("Q", data[:8])
event_str = data[12 : 12 + int(header[0])] # 8+4
data = data[12 + int(header[0]) + 4 :]
event = event_pb2.Event()
event.ParseFromString(event_str)
if event.HasField("summary"):
for value in event.summary.value:
if value.simple_value == 1.0 and value.tag == "tensor":
found_tensor = True
assert found_tensor, "Converted tensor was not found in the log file!"
def test_project_dir(self):
with self.assertRaisesRegex(ValueError, "Logging with `tensorboard` requires a `logging_dir`"):
_ = Accelerator(log_with="tensorboard")
with tempfile.TemporaryDirectory() as dirpath:
_ = Accelerator(log_with="tensorboard", project_dir=dirpath)
def test_project_dir_with_config(self):
config = ProjectConfiguration(total_limit=30)
with tempfile.TemporaryDirectory() as dirpath:
_ = Accelerator(log_with="tensorboard", project_dir=dirpath, project_config=config)
@require_wandb
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
class WandBTrackingTest(TempDirTestCase, MockingTestCase):
def setUp(self):
super().setUp()
# wandb let's us override where logs are stored to via the WANDB_DIR env var
self.add_mocks(mock.patch.dict(os.environ, {"WANDB_DIR": self.tmpdir}))
@staticmethod
def parse_log(log: str, section: str, record: bool = True):
"""
Parses wandb log for `section` and returns a dictionary of
all items in that section. Section names are based on the
output of `wandb sync --view --verbose` and items starting
with "Record" in that result
"""
# Big thanks to the W&B team for helping us parse their logs
pattern = rf"{section} ([\S\s]*?)\n\n"
if record:
pattern = rf"Record: {pattern}"
cleaned_record = re.findall(pattern, log)[0]
# A config
if section == "config" or section == "history":
cleaned_record = re.findall(r'"([a-zA-Z0-9_.,]+)', cleaned_record)
return {key: val for key, val in zip(cleaned_record[0::2], cleaned_record[1::2])}
# Everything else
else:
return dict(re.findall(r'(\w+): "([^\s]+)"', cleaned_record))
@skip
def test_wandb(self):
project_name = "test_project_with_config"
accelerator = Accelerator(log_with="wandb")
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
kwargs = {"wandb": {"tags": ["my_tag"]}}
accelerator.init_trackers(project_name, config, kwargs)
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
accelerator.log(values, step=0)
accelerator.end_training()
# The latest offline log is stored at wandb/latest-run/*.wandb
for child in Path(f"{self.tmpdir}/wandb/latest-run").glob("*"):
if child.is_file() and child.suffix == ".wandb":
cmd = ["wandb", "sync", "--view", "--verbose", str(child)]
content = subprocess.check_output(cmd, encoding="utf8", errors="ignore")
break
# Check HPS through careful parsing and cleaning
logged_items = self.parse_log(content, "config")
assert logged_items["num_iterations"] == "12"
assert logged_items["learning_rate"] == "0.01"
assert logged_items["some_boolean"] == "false"
assert logged_items["some_string"] == "some_value"
assert logged_items["some_string"] == "some_value"
# Run tags
logged_items = self.parse_log(content, "run", False)
assert logged_items["tags"] == "my_tag"
# Actual logging
logged_items = self.parse_log(content, "history")
assert logged_items["total_loss"] == "0.1"
assert logged_items["iteration"] == "1"
assert logged_items["my_text"] == "some_value"
assert logged_items["_step"] == "0"
# Comet has a special `OfflineExperiment` we need to use for testing
def offline_init(self, run_name: str, tmpdir: str):
self.run_name = run_name
self.writer = OfflineExperiment(project_name=run_name, offline_directory=tmpdir)
logger.info(f"Initialized offline CometML project {self.run_name}")
logger.info("Make sure to log any initial configurations with `self.store_init_configuration` before training!")
@require_comet_ml
@mock.patch.object(CometMLTracker, "__init__", offline_init)
class CometMLTest(unittest.TestCase):
@staticmethod
def get_value_from_key(log_list, key: str, is_param: bool = False):
"Extracts `key` from Comet `log`"
for log in log_list:
j = json.loads(log)["payload"]
if is_param and "param" in j.keys():
if j["param"]["paramName"] == key:
return j["param"]["paramValue"]
if "log_other" in j.keys():
if j["log_other"]["key"] == key:
return j["log_other"]["val"]
if "metric" in j.keys():
if j["metric"]["metricName"] == key:
return j["metric"]["metricValue"]
if j.get("key", None) == key:
return j["value"]
def test_init_trackers(self):
with tempfile.TemporaryDirectory() as d:
tracker = CometMLTracker("test_project_with_config", d)
accelerator = Accelerator(log_with=tracker)
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
accelerator.init_trackers(None, config)
accelerator.end_training()
log = os.listdir(d)[0] # Comet is nice, it's just a zip file here
# We parse the raw logs
p = os.path.join(d, log)
archive = zipfile.ZipFile(p, "r")
log = archive.open("messages.json").read().decode("utf-8")
list_of_json = log.split("\n")[:-1]
assert self.get_value_from_key(list_of_json, "num_iterations", True) == 12
assert self.get_value_from_key(list_of_json, "learning_rate", True) == 0.01
assert self.get_value_from_key(list_of_json, "some_boolean", True) is False
assert self.get_value_from_key(list_of_json, "some_string", True) == "some_value"
def test_log(self):
with tempfile.TemporaryDirectory() as d:
tracker = CometMLTracker("test_project_with_config", d)
accelerator = Accelerator(log_with=tracker)
accelerator.init_trackers(None)
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
accelerator.log(values, step=0)
accelerator.end_training()
log = os.listdir(d)[0] # Comet is nice, it's just a zip file here
# We parse the raw logs
p = os.path.join(d, log)
archive = zipfile.ZipFile(p, "r")
log = archive.open("messages.json").read().decode("utf-8")
list_of_json = log.split("\n")[:-1]
assert self.get_value_from_key(list_of_json, "curr_step", True) == 0
assert self.get_value_from_key(list_of_json, "total_loss") == 0.1
assert self.get_value_from_key(list_of_json, "iteration") == 1
assert self.get_value_from_key(list_of_json, "my_text") == "some_value"
@require_clearml
class ClearMLTest(TempDirTestCase, MockingTestCase):
def setUp(self):
super().setUp()
# ClearML offline session location is stored in CLEARML_CACHE_DIR
self.add_mocks(mock.patch.dict(os.environ, {"CLEARML_CACHE_DIR": str(self.tmpdir)}))
@staticmethod
def _get_offline_dir(accelerator):
from clearml.config import get_offline_dir
return get_offline_dir(task_id=accelerator.get_tracker("clearml", unwrap=True).id)
@staticmethod
def _get_metrics(offline_dir):
metrics = []
with open(os.path.join(offline_dir, "metrics.jsonl")) as f:
json_lines = f.readlines()
for json_line in json_lines:
metrics.extend(json.loads(json_line))
return metrics
def test_init_trackers(self):
from clearml import Task
from clearml.utilities.config import text_to_config_dict
Task.set_offline(True)
accelerator = Accelerator(log_with="clearml")
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
accelerator.init_trackers("test_project_with_config", config)
offline_dir = ClearMLTest._get_offline_dir(accelerator)
accelerator.end_training()
with open(os.path.join(offline_dir, "task.json")) as f:
offline_session = json.load(f)
clearml_offline_config = text_to_config_dict(offline_session["configuration"]["General"]["value"])
assert config == clearml_offline_config
def test_log(self):
from clearml import Task
Task.set_offline(True)
accelerator = Accelerator(log_with="clearml")
accelerator.init_trackers("test_project_with_log")
values_with_iteration = {"should_be_under_train": 1, "eval_value": 2, "test_value": 3.1, "train_value": 4.1}
accelerator.log(values_with_iteration, step=1)
single_values = {"single_value_1": 1.1, "single_value_2": 2.2}
accelerator.log(single_values)
offline_dir = ClearMLTest._get_offline_dir(accelerator)
accelerator.end_training()
metrics = ClearMLTest._get_metrics(offline_dir)
assert (len(values_with_iteration) + len(single_values)) == len(metrics)
for metric in metrics:
if metric["metric"] == "Summary":
assert metric["variant"] in single_values
assert metric["value"] == single_values[metric["variant"]]
elif metric["metric"] == "should_be_under_train":
assert metric["variant"] == "train"
assert metric["iter"] == 1
assert metric["value"] == values_with_iteration["should_be_under_train"]
else:
values_with_iteration_key = metric["variant"] + "_" + metric["metric"]
assert values_with_iteration_key in values_with_iteration
assert metric["iter"] == 1
assert metric["value"] == values_with_iteration[values_with_iteration_key]
def test_log_images(self):
from clearml import Task
Task.set_offline(True)
accelerator = Accelerator(log_with="clearml")
accelerator.init_trackers("test_project_with_log_images")
base_image = np.eye(256, 256, dtype=np.uint8) * 255
base_image_3d = np.concatenate((np.atleast_3d(base_image), np.zeros((256, 256, 2), dtype=np.uint8)), axis=2)
images = {
"base_image": base_image,
"base_image_3d": base_image_3d,
}
accelerator.get_tracker("clearml").log_images(images, step=1)
offline_dir = ClearMLTest._get_offline_dir(accelerator)
accelerator.end_training()
images_saved = Path(os.path.join(offline_dir, "data")).rglob("*.jpeg")
assert len(list(images_saved)) == len(images)
def test_log_table(self):
from clearml import Task
Task.set_offline(True)
accelerator = Accelerator(log_with="clearml")
accelerator.init_trackers("test_project_with_log_table")
accelerator.get_tracker("clearml").log_table(
"from lists with columns", columns=["A", "B", "C"], data=[[1, 3, 5], [2, 4, 6]]
)
accelerator.get_tracker("clearml").log_table("from lists", data=[["A2", "B2", "C2"], [7, 9, 11], [8, 10, 12]])
offline_dir = ClearMLTest._get_offline_dir(accelerator)
accelerator.end_training()
metrics = ClearMLTest._get_metrics(offline_dir)
assert len(metrics) == 2
for metric in metrics:
assert metric["metric"] in ("from lists", "from lists with columns")
plot = json.loads(metric["plot_str"])
if metric["metric"] == "from lists with columns":
print(plot["data"][0])
self.assertCountEqual(plot["data"][0]["header"]["values"], ["A", "B", "C"])
self.assertCountEqual(plot["data"][0]["cells"]["values"], [[1, 2], [3, 4], [5, 6]])
else:
self.assertCountEqual(plot["data"][0]["header"]["values"], ["A2", "B2", "C2"])
self.assertCountEqual(plot["data"][0]["cells"]["values"], [[7, 8], [9, 10], [11, 12]])
@require_pandas
def test_log_table_pandas(self):
import pandas as pd
from clearml import Task
Task.set_offline(True)
accelerator = Accelerator(log_with="clearml")
accelerator.init_trackers("test_project_with_log_table_pandas")
accelerator.get_tracker("clearml").log_table(
"from df", dataframe=pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}), step=1
)
offline_dir = ClearMLTest._get_offline_dir(accelerator)
accelerator.end_training()
metrics = ClearMLTest._get_metrics(offline_dir)
assert len(metrics) == 1
assert metrics[0]["metric"] == "from df"
plot = json.loads(metrics[0]["plot_str"])
self.assertCountEqual(plot["data"][0]["header"]["values"], [["A"], ["B"], ["C"]])
self.assertCountEqual(plot["data"][0]["cells"]["values"], [[1, 2], [3, 4], [5, 6]])
class MyCustomTracker(GeneralTracker):
"Basic tracker that writes to a csv for testing"
_col_names = [
"total_loss",
"iteration",
"my_text",
"learning_rate",
"num_iterations",
"some_boolean",
"some_string",
]
name = "my_custom_tracker"
requires_logging_directory = False
def __init__(self, dir: str):
self.f = open(f"{dir}/log.csv", "w+")
self.writer = csv.DictWriter(self.f, fieldnames=self._col_names)
self.writer.writeheader()
@property
def tracker(self):
return self.writer
def store_init_configuration(self, values: dict):
logger.info("Call init")
self.writer.writerow(values)
def log(self, values: dict, step: Optional[int]):
logger.info("Call log")
self.writer.writerow(values)
def finish(self):
self.f.close()
class CustomTrackerTestCase(unittest.TestCase):
def test_init_trackers(self):
with tempfile.TemporaryDirectory() as d:
tracker = MyCustomTracker(d)
accelerator = Accelerator(log_with=tracker)
config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"}
accelerator.init_trackers("Some name", config)
accelerator.end_training()
with open(f"{d}/log.csv") as f:
data = csv.DictReader(f)
data = next(data)
truth = {
"total_loss": "",
"iteration": "",
"my_text": "",
"learning_rate": "0.01",
"num_iterations": "12",
"some_boolean": "False",
"some_string": "some_value",
}
assert data == truth
def test_log(self):
with tempfile.TemporaryDirectory() as d:
tracker = MyCustomTracker(d)
accelerator = Accelerator(log_with=tracker)
accelerator.init_trackers("Some name")
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
accelerator.log(values, step=0)
accelerator.end_training()
with open(f"{d}/log.csv") as f:
data = csv.DictReader(f)
data = next(data)
truth = {
"total_loss": "0.1",
"iteration": "1",
"my_text": "some_value",
"learning_rate": "",
"num_iterations": "",
"some_boolean": "",
"some_string": "",
}
assert data == truth
@require_dvclive
@mock.patch("dvclive.live.get_dvc_repo", return_value=None)
class DVCLiveTrackingTest(unittest.TestCase):
def test_init_trackers(self, mock_repo):
project_name = "test_project_with_config"
with tempfile.TemporaryDirectory() as dirpath:
accelerator = Accelerator(log_with="dvclive")
config = {
"num_iterations": 12,
"learning_rate": 1e-2,
"some_boolean": False,
"some_string": "some_value",
}
init_kwargs = {"dvclive": {"dir": dirpath, "save_dvc_exp": False, "dvcyaml": None}}
accelerator.init_trackers(project_name, config, init_kwargs)
accelerator.end_training()
live = accelerator.trackers[0].live
params = load_yaml(live.params_file)
assert params == config
def test_log(self, mock_repo):
project_name = "test_project_with_log"
with tempfile.TemporaryDirectory() as dirpath:
accelerator = Accelerator(log_with="dvclive", project_dir=dirpath)
init_kwargs = {"dvclive": {"dir": dirpath, "save_dvc_exp": False, "dvcyaml": None}}
accelerator.init_trackers(project_name, init_kwargs=init_kwargs)
values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"}
# Log step 0
accelerator.log(values)
# Log step 1
accelerator.log(values)
# Log step 3 (skip step 2)
accelerator.log(values, step=3)
accelerator.end_training()
live = accelerator.trackers[0].live
logs, latest = parse_metrics(live)
assert latest.pop("step") == 3
assert latest == values
scalars = os.path.join(live.plots_dir, Metric.subfolder)
for val in values.keys():
val_path = os.path.join(scalars, f"{val}.tsv")
steps = [int(row["step"]) for row in logs[val_path]]
assert steps == [0, 1, 3]
| accelerate/tests/test_tracking.py/0 | {
"file_path": "accelerate/tests/test_tracking.py",
"repo_id": "accelerate",
"token_count": 10128
} |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| candle/LICENSE-APACHE/0 | {
"file_path": "candle/LICENSE-APACHE",
"repo_id": "candle",
"token_count": 3168
} |
# Simplified
## How its works
This program implements a neural network to predict the winner of the second round of elections based on the results of the first round.
Basic moments:
1. A multilayer perceptron with two hidden layers is used. The first hidden layer has 4 neurons, the second has 2 neurons.
2. The input is a vector of 2 numbers - the percentage of votes for the first and second candidates in the first stage.
3. The output is the number 0 or 1, where 1 means that the first candidate will win in the second stage, 0 means that he will lose.
4. For training, samples with real data on the results of the first and second stages of different elections are used.
5. The model is trained by backpropagation using gradient descent and the cross-entropy loss function.
6. Model parameters (weights of neurons) are initialized randomly, then optimized during training.
7. After training, the model is tested on a deferred sample to evaluate the accuracy.
8. If the accuracy on the test set is below 100%, the model is considered underfit and the learning process is repeated.
Thus, this neural network learns to find hidden relationships between the results of the first and second rounds of voting in order to make predictions for new data.
```rust,ignore
{{#include ../simplified.rs:book_training_simplified1}}
```
```rust,ignore
{{#include ../simplified.rs:book_training_simplified2}}
```
```rust,ignore
{{#include ../simplified.rs:book_training_simplified3}}
```
## Example output
```bash
Trying to train neural network.
Epoch: 1 Train loss: 4.42555 Test accuracy: 0.00%
Epoch: 2 Train loss: 0.84677 Test accuracy: 33.33%
Epoch: 3 Train loss: 2.54335 Test accuracy: 33.33%
Epoch: 4 Train loss: 0.37806 Test accuracy: 33.33%
Epoch: 5 Train loss: 0.36647 Test accuracy: 100.00%
real_life_votes: [13, 22]
neural_network_prediction_result: 0.0
```
| candle/candle-book/src/training/simplified.md/0 | {
"file_path": "candle/candle-book/src/training/simplified.md",
"repo_id": "candle",
"token_count": 530
} |
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use anyhow::Result;
use candle_core::{Device, Tensor};
fn main() -> Result<()> {
let device = Device::new_cuda(0)?;
let x = Tensor::randn(0f32, 1.0, (8 * 4096, 8 * 4096), &device)?
.to_dtype(candle_core::DType::BF16)?;
candle_core::cuda::set_gemm_reduced_precision_f32(false);
candle_core::cuda::set_gemm_reduced_precision_bf16(false);
let _x1 = x.matmul(&x)?;
drop(_x1);
let start_time = std::time::Instant::now();
let _x1 = x.matmul(&x)?;
device.synchronize()?;
println!("fp32: {:?}", start_time.elapsed());
drop(_x1);
candle_core::cuda::set_gemm_reduced_precision_f32(true);
candle_core::cuda::set_gemm_reduced_precision_bf16(true);
let _x1 = x.matmul(&x)?;
drop(_x1);
let start_time = std::time::Instant::now();
let _x1 = x.matmul(&x)?;
device.synchronize()?;
println!("tf32: {:?}", start_time.elapsed());
drop(_x1);
Ok(())
}
| candle/candle-core/examples/cuda_basics.rs/0 | {
"file_path": "candle/candle-core/examples/cuda_basics.rs",
"repo_id": "candle",
"token_count": 477
} |
use crate::WithDType;
use cudarc;
use cudarc::cudnn::safe::{ConvForward, Cudnn};
use cudarc::driver::{CudaSlice, CudaView, DeviceRepr, ValidAsZeroBits};
use std::cell::RefCell;
use std::collections::HashMap;
use std::sync::Arc;
// The cudnn handles are stored per thread here rather than on the CudaDevice as they are neither
// send nor sync.
thread_local! {
static CUDNN: RefCell<HashMap<crate::cuda_backend::DeviceId, Arc<Cudnn>>> = HashMap::new().into();
}
impl From<cudarc::cudnn::CudnnError> for crate::Error {
fn from(err: cudarc::cudnn::CudnnError) -> Self {
crate::Error::wrap(err)
}
}
impl From<cudarc::driver::DriverError> for crate::Error {
fn from(err: cudarc::driver::DriverError) -> Self {
crate::Error::wrap(err)
}
}
pub(crate) fn launch_conv2d<
T: DeviceRepr + WithDType + ValidAsZeroBits + cudarc::cudnn::CudnnDataType,
Y: cudarc::cudnn::CudnnDataType,
>(
src: &CudaView<T>,
src_l: &crate::Layout,
filter: &CudaView<T>,
dst: &mut CudaSlice<T>,
params: &crate::conv::ParamsConv2D,
dev: &crate::cuda_backend::CudaDevice,
) -> crate::Result<()> {
use crate::conv::CudnnFwdAlgo as CandleAlgo;
use cudarc::cudnn::sys::cudnnConvolutionFwdAlgo_t as A;
let device_id = dev.id();
let cudnn = CUDNN.with(|cudnn| {
if let Some(cudnn) = cudnn.borrow().get(&device_id) {
return Ok(cudnn.clone());
}
let c = Cudnn::new(dev.cuda_device());
if let Ok(c) = &c {
cudnn.borrow_mut().insert(device_id, c.clone());
}
c
})?;
let conv = cudnn.create_conv2d::<Y>(
/* pad */ [params.padding as i32, params.padding as i32],
/* stride */ [params.stride as i32, params.stride as i32],
/* dilation */ [params.dilation as i32, params.dilation as i32],
cudarc::cudnn::sys::cudnnConvolutionMode_t::CUDNN_CROSS_CORRELATION,
)?;
let x_shape = [
params.b_size as i32,
params.c_in as i32,
params.i_h as i32,
params.i_w as i32,
];
// Note that `src` already starts at the proper offset.
let x = if src_l.is_contiguous() {
cudnn.create_4d_tensor::<T>(
cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW,
x_shape,
)?
} else {
let s = src_l.stride();
cudnn.create_4d_tensor_ex::<T>(
x_shape,
[s[0] as i32, s[1] as i32, s[2] as i32, s[3] as i32],
)?
};
let w = cudnn.create_4d_filter::<T>(
cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW,
[
params.c_out as i32,
params.c_in as i32,
params.k_h as i32,
params.k_w as i32,
],
)?;
let (w_out, h_out) = (params.out_w() as i32, params.out_h() as i32);
let y = cudnn.create_4d_tensor::<T>(
cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW,
[params.b_size as i32, params.c_out as i32, h_out, w_out],
)?;
let conv2d = ConvForward {
conv: &conv,
x: &x,
w: &w,
y: &y,
};
let alg = match params.cudnn_fwd_algo {
None => conv2d.pick_algorithm()?,
Some(CandleAlgo::ImplicitGemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
Some(CandleAlgo::ImplicitPrecompGemm) => {
A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM
}
Some(CandleAlgo::Gemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_GEMM,
Some(CandleAlgo::Direct) => A::CUDNN_CONVOLUTION_FWD_ALGO_DIRECT,
Some(CandleAlgo::Fft) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT,
Some(CandleAlgo::FftTiling) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING,
Some(CandleAlgo::Winograd) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD,
Some(CandleAlgo::WinogradNonFused) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
Some(CandleAlgo::Count) => A::CUDNN_CONVOLUTION_FWD_ALGO_COUNT,
};
let workspace_size = conv2d.get_workspace_size(alg)?;
let mut workspace = dev.cuda_device().alloc_zeros::<u8>(workspace_size)?;
unsafe {
conv2d.launch::<CudaSlice<u8>, _, _, _>(
alg,
Some(&mut workspace),
(T::one(), T::zero()),
src,
filter,
dst,
)?;
}
Ok(())
}
| candle/candle-core/src/cuda_backend/cudnn.rs/0 | {
"file_path": "candle/candle-core/src/cuda_backend/cudnn.rs",
"repo_id": "candle",
"token_count": 2242
} |
//! Implementation of Backend traits for Metal
//!
use crate::backend::{BackendDevice, BackendStorage};
use crate::conv::{ParamsConv1D, ParamsConv2D, ParamsConvTranspose1D, ParamsConvTranspose2D};
use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT};
use crate::{CpuStorage, CpuStorageRef, DType, Layout, Result, Shape};
use candle_metal_kernels::{BufferOffset, CallConvTranspose2dCfg, Kernels};
use metal::{Buffer, MTLResourceOptions, NSUInteger};
use std::collections::HashMap;
use std::ffi::c_void;
use std::sync::{Arc, Mutex, PoisonError, RwLock, TryLockError};
mod device;
pub use device::{DeviceId, MetalDevice};
pub fn buffer_o<'a>(buffer: &'a Buffer, l: &Layout, dtype: DType) -> BufferOffset<'a> {
BufferOffset {
buffer,
offset_in_bytes: l.start_offset() * dtype.size_in_bytes(),
}
}
/// Simple way to catch lock error without
/// depending on T
#[derive(thiserror::Error, Debug)]
pub enum LockError {
#[error("{0}")]
Poisoned(String),
#[error("Would block")]
WouldBlock,
}
impl<T> From<TryLockError<T>> for MetalError {
fn from(value: TryLockError<T>) -> Self {
match value {
TryLockError::Poisoned(p) => MetalError::LockError(LockError::Poisoned(p.to_string())),
TryLockError::WouldBlock => MetalError::LockError(LockError::WouldBlock),
}
}
}
impl<T> From<PoisonError<T>> for MetalError {
fn from(p: PoisonError<T>) -> Self {
MetalError::LockError(LockError::Poisoned(p.to_string()))
}
}
/// Metal related errors
#[derive(thiserror::Error, Debug)]
pub enum MetalError {
#[error("{0}")]
Message(String),
#[error(transparent)]
KernelError(#[from] candle_metal_kernels::MetalKernelError),
#[error("{0:?}")]
LockError(LockError),
#[error("{msg}, expected: {expected:?}, got: {got:?}")]
UnexpectedDType {
msg: &'static str,
expected: DType,
got: DType,
},
}
impl From<String> for MetalError {
fn from(e: String) -> Self {
MetalError::Message(e)
}
}
#[derive(Debug, Clone)]
pub struct MetalStorage {
/// The actual buffer containing the data.
buffer: Arc<metal::Buffer>,
/// a reference to the device owning this buffer
device: MetalDevice,
/// The count of allocated elements in the buffer
count: usize,
/// The dtype is kept since buffers are untyped.
dtype: DType,
}
impl BackendStorage for MetalStorage {
type Device = MetalDevice;
fn try_clone(&self, _: &Layout) -> Result<Self> {
Ok(self.clone())
}
fn dtype(&self) -> DType {
self.dtype
}
fn device(&self) -> &Self::Device {
&self.device
}
fn to_cpu_storage(&self) -> Result<CpuStorage> {
match self.dtype {
DType::U8 => Ok(CpuStorage::U8(self.to_cpu()?)),
DType::U32 => Ok(CpuStorage::U32(self.to_cpu()?)),
DType::I64 => Ok(CpuStorage::I64(self.to_cpu()?)),
DType::F16 => Ok(CpuStorage::F16(self.to_cpu()?)),
DType::BF16 => Ok(CpuStorage::BF16(self.to_cpu()?)),
DType::F32 => Ok(CpuStorage::F32(self.to_cpu()?)),
DType::F64 => Ok(CpuStorage::F64(self.to_cpu()?)),
}
}
fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> {
let device = self.device().clone();
let shape = layout.shape();
let el = shape.elem_count();
let dtype = self.dtype;
let buffer = device.new_buffer(el, self.dtype, "affine")?;
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&self.buffer, layout, dtype);
if layout.is_contiguous() {
let name = match self.dtype {
DType::F32 => "affine_f32",
DType::F16 => "affine_f16",
DType::BF16 => "affine_bf16",
DType::U8 => "affine_u8",
DType::U32 => "affine_u32",
dtype => crate::bail!("Metal contiguous affine {dtype:?} not implemented"),
};
candle_metal_kernels::call_affine(
&device.device,
&command_buffer,
&device.kernels,
name,
el,
src,
&buffer,
mul as f32,
add as f32,
)
.map_err(MetalError::from)?;
} else {
let name = match self.dtype {
DType::F32 => "affine_f32_strided",
DType::F16 => "affine_f16_strided",
DType::BF16 => "affine_bf16_strided",
dtype => crate::bail!("Metal strided affine {dtype:?} not implemented"),
};
candle_metal_kernels::call_affine_strided(
&device.device,
&command_buffer,
&device.kernels,
name,
layout.dims(),
src,
layout.stride(),
&buffer,
mul as f32,
add as f32,
)
.map_err(MetalError::from)?;
}
Ok(Self::new(buffer, device.clone(), el, dtype))
}
fn powf(&self, layout: &Layout, pow: f64) -> Result<Self> {
let device = self.device().clone();
let shape = layout.shape();
let el = shape.elem_count();
let dtype = self.dtype;
let buffer = device.new_buffer(el, self.dtype, "powf")?;
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&self.buffer, layout, dtype);
if layout.is_contiguous() {
let name = match self.dtype {
DType::F32 => "powf_f32",
DType::F16 => "powf_f16",
DType::BF16 => "powf_bf16",
dtype => crate::bail!("Metal contiguous powf {dtype:?} not implemented"),
};
candle_metal_kernels::call_powf(
&device.device,
&command_buffer,
&device.kernels,
name,
el,
src,
&buffer,
pow as f32,
)
.map_err(MetalError::from)?;
} else {
let name = match self.dtype {
DType::F32 => "powf_f32_strided",
DType::F16 => "powf_f16_strided",
DType::BF16 => "powf_bf16_strided",
dtype => crate::bail!("Metal strided powf {dtype:?} not implemented"),
};
candle_metal_kernels::call_powf_strided(
&device.device,
&command_buffer,
&device.kernels,
name,
layout.dims(),
src,
layout.stride(),
&buffer,
pow as f32,
)
.map_err(MetalError::from)?;
}
Ok(Self::new(buffer, device.clone(), el, dtype))
}
fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> {
let device = self.device().clone();
let shape = layout.shape();
let el = shape.elem_count();
let dtype = self.dtype;
let buffer = device.new_buffer(el, self.dtype, "elu")?;
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&self.buffer, layout, self.dtype);
if layout.is_contiguous() {
let name = match self.dtype {
DType::F32 => "elu_f32",
DType::F16 => "elu_f16",
DType::BF16 => "elu_bf16",
dtype => crate::bail!("Metal contiguous elu {dtype:?} not implemented"),
};
candle_metal_kernels::call_elu(
&device.device,
&command_buffer,
&device.kernels,
name,
el,
src,
&buffer,
alpha as f32,
)
.map_err(MetalError::from)?;
} else {
let name = match self.dtype {
DType::F32 => "elu_f32_strided",
DType::F16 => "elu_f16_strided",
DType::BF16 => "elu_bf16_strided",
dtype => crate::bail!("Metal strided elu {dtype:?} not implemented"),
};
candle_metal_kernels::call_elu_strided(
&device.device,
&command_buffer,
&device.kernels,
name,
layout.dims(),
src,
layout.stride(),
&buffer,
alpha as f32,
)
.map_err(MetalError::from)?;
}
Ok(Self::new(buffer, device.clone(), el, dtype))
}
fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> {
let device = self.device.clone();
let src_stride = layout.stride();
let src_dims = layout.shape().dims();
// Source dims and strides with the sum dims at the end.
let mut dims = vec![];
let mut stride = vec![];
let mut dst_el: usize = 1;
for (dim_idx, &d) in src_dims.iter().enumerate() {
if !sum_dims.contains(&dim_idx) {
dst_el *= d;
dims.push(d);
stride.push(src_stride[dim_idx]);
}
}
for &dim_idx in sum_dims.iter() {
dims.push(src_dims[dim_idx]);
stride.push(src_stride[dim_idx]);
}
let reduction_shape = Shape::from(dims.clone());
if layout.is_contiguous() && reduction_shape.is_contiguous(&stride) {
let (name, check_empty, return_index) = match (op, self.dtype) {
(ReduceOp::Sum, DType::F32) => ("fast_sum_f32", false, false),
(ReduceOp::Min, DType::F32) => ("fast_min_f32", true, false),
(ReduceOp::Max, DType::F32) => ("fast_max_f32", true, false),
(ReduceOp::ArgMin, DType::F32) => ("fast_argmin_f32", true, true),
(ReduceOp::ArgMax, DType::F32) => ("fast_argmax_f32", true, true),
(ReduceOp::Sum, DType::U32) => ("fast_sum_u32", false, false),
(ReduceOp::Min, DType::U32) => ("fast_min_u32", true, false),
(ReduceOp::Max, DType::U32) => ("fast_max_u32", true, false),
(ReduceOp::ArgMin, DType::U32) => ("fast_argmin_u32", true, true),
(ReduceOp::ArgMax, DType::U32) => ("fast_argmax_u32", true, true),
(ReduceOp::Sum, DType::F16) => ("fast_sum_f16", false, false),
(ReduceOp::Min, DType::F16) => ("fast_min_f16", true, false),
(ReduceOp::Max, DType::F16) => ("fast_max_f16", true, false),
(ReduceOp::ArgMin, DType::F16) => ("fast_argmin_f16", true, true),
(ReduceOp::ArgMax, DType::F16) => ("fast_argmax_f16", true, true),
(ReduceOp::Sum, DType::BF16) => ("fast_sum_bf16", false, false),
(ReduceOp::Min, DType::BF16) => ("fast_min_bf16", true, false),
(ReduceOp::Max, DType::BF16) => ("fast_max_bf16", true, false),
(ReduceOp::ArgMin, DType::BF16) => ("fast_argmin_bf16", true, true),
(ReduceOp::ArgMax, DType::BF16) => ("fast_argmax_bf16", true, true),
(ReduceOp::Sum, DType::I64) => ("fast_sum_i64", false, false),
(ReduceOp::Min, DType::I64) => ("fast_min_i64", true, false),
(ReduceOp::Max, DType::I64) => ("fast_max_i64", true, false),
(ReduceOp::ArgMin, DType::I64) => ("fast_argmin_i64", true, true),
(ReduceOp::ArgMax, DType::I64) => ("fast_argmax_i64", true, true),
(ReduceOp::Sum, DType::U8) => ("fast_sum_u8", false, false),
(ReduceOp::Min, DType::U8) => ("fast_min_u8", true, false),
(ReduceOp::Max, DType::U8) => ("fast_max_u8", true, false),
(ReduceOp::ArgMin, DType::U8) => ("fast_argmin_u8", true, true),
(ReduceOp::ArgMax, DType::U8) => ("fast_argmax_u8", true, true),
(k, dtype) => {
crate::bail!("Metal contiguous reduce op {k:?} {dtype:?} not implemented")
}
};
if check_empty && layout.shape().elem_count() == 0 {
Err(crate::Error::EmptyTensor { op: "reduce" }.bt())?
}
let dtype = if return_index { DType::U32 } else { self.dtype };
let buffer = device.new_buffer(dst_el, dtype, "reduce")?;
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&self.buffer, layout, self.dtype);
candle_metal_kernels::call_reduce_contiguous(
&device.device,
&command_buffer,
&device.kernels,
name,
src_dims,
dst_el,
src,
&buffer,
)
.map_err(MetalError::from)?;
return Ok(Self::new(buffer, device, dst_el, dtype));
}
let (name, check_empty, return_index) = match (op, self.dtype) {
(ReduceOp::Sum, DType::F32) => ("fast_sum_f32_strided", false, false),
(ReduceOp::Min, DType::F32) => ("fast_min_f32_strided", true, false),
(ReduceOp::Max, DType::F32) => ("fast_max_f32_strided", true, false),
(ReduceOp::ArgMin, DType::F32) => ("fast_argmin_f32_strided", true, true),
(ReduceOp::ArgMax, DType::F32) => ("fast_argmax_f32_strided", true, true),
(ReduceOp::Sum, DType::U32) => ("fast_sum_u32_strided", false, false),
(ReduceOp::Min, DType::U32) => ("fast_min_u32_strided", true, false),
(ReduceOp::Max, DType::U32) => ("fast_max_u32_strided", true, false),
(ReduceOp::ArgMin, DType::U32) => ("fast_argmin_u32_strided", true, true),
(ReduceOp::ArgMax, DType::U32) => ("fast_argmax_u32_strided", true, true),
(ReduceOp::Sum, DType::F16) => ("fast_sum_f16_strided", false, false),
(ReduceOp::Min, DType::F16) => ("fast_min_f16_strided", true, false),
(ReduceOp::Max, DType::F16) => ("fast_max_f16_strided", true, false),
(ReduceOp::ArgMin, DType::F16) => ("fast_argmin_f16_strided", true, true),
(ReduceOp::ArgMax, DType::F16) => ("fast_argmax_f16_strided", true, true),
(ReduceOp::Sum, DType::BF16) => ("fast_sum_bf16_strided", false, false),
(ReduceOp::Min, DType::BF16) => ("fast_min_bf16_strided", true, false),
(ReduceOp::Max, DType::BF16) => ("fast_max_bf16_strided", true, false),
(ReduceOp::ArgMin, DType::BF16) => ("fast_argmin_bf16_strided", true, true),
(ReduceOp::ArgMax, DType::BF16) => ("fast_argmax_bf16_strided", true, true),
(ReduceOp::Sum, DType::I64) => ("fast_sum_i64_strided", false, false),
(ReduceOp::Min, DType::I64) => ("fast_min_i64_strided", true, false),
(ReduceOp::Max, DType::I64) => ("fast_max_i64_strided", true, false),
(ReduceOp::ArgMin, DType::I64) => ("fast_argmin_i64_strided", true, true),
(ReduceOp::ArgMax, DType::I64) => ("fast_argmax_i64_strided", true, true),
(ReduceOp::Sum, DType::U8) => ("fast_sum_u8_strided", false, false),
(ReduceOp::Min, DType::U8) => ("fast_min_u8_strided", true, false),
(ReduceOp::Max, DType::U8) => ("fast_max_u8_strided", true, false),
(ReduceOp::ArgMin, DType::U8) => ("fast_argmin_u8_strided", true, true),
(ReduceOp::ArgMax, DType::U8) => ("fast_argmax_u8_strided", true, true),
(k, dtype) => crate::bail!("Metal strided reduce op {k:?} {dtype:?} not implemented"),
};
if check_empty && layout.shape().elem_count() == 0 {
Err(crate::Error::EmptyTensor { op: "reduce" }.bt())?
}
let dtype = if return_index { DType::U32 } else { self.dtype };
let buffer = device.new_buffer(dst_el, dtype, "reduce")?;
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&self.buffer, layout, self.dtype);
candle_metal_kernels::call_reduce_strided(
&device.device,
&command_buffer,
&device.kernels,
name,
&dims,
&stride,
dst_el,
src,
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::new(buffer, device, dst_el, dtype))
}
fn cmp(&self, op: CmpOp, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout) -> Result<Self> {
let name = match op {
CmpOp::Eq => "eq",
CmpOp::Ne => "ne",
CmpOp::Le => "le",
CmpOp::Ge => "ge",
CmpOp::Lt => "lt",
CmpOp::Gt => "gt",
};
self.binary(name, rhs, lhs_l, rhs_l)
}
fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> {
let device = self.device();
let shape = layout.shape();
let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype, "todtype")?;
let command_buffer = device.command_buffer()?;
let src = buffer_o(&self.buffer, layout, self.dtype);
if layout.is_contiguous() {
let kernel_name = match (self.dtype, dtype) {
(DType::U32, DType::BF16) => "cast_u32_bf16",
(DType::U32, DType::F16) => "cast_u32_f16",
(DType::U32, DType::F32) => "cast_u32_f32",
(DType::U32, DType::I64) => "cast_u32_i64",
(DType::U32, DType::U8) => "cast_u32_u8",
(DType::U8, DType::BF16) => "cast_u8_bf16",
(DType::U8, DType::F16) => "cast_u8_f16",
(DType::U8, DType::F32) => "cast_u8_f32",
(DType::U8, DType::I64) => "cast_u8_i64",
(DType::U8, DType::U32) => "cast_u8_u32",
(DType::F32, DType::BF16) => "cast_f32_bf16",
(DType::F32, DType::F16) => "cast_f32_f16",
(DType::F32, DType::I64) => "cast_f32_i64",
(DType::F32, DType::U32) => "cast_f32_u32",
(DType::F32, DType::U8) => "cast_f32_u8",
(DType::I64, DType::BF16) => "cast_i64_bf16",
(DType::I64, DType::F16) => "cast_i64_f16",
(DType::I64, DType::F32) => "cast_i64_f32",
(DType::I64, DType::U32) => "cast_i64_u32",
(DType::I64, DType::U8) => "cast_i64_u8",
(DType::F16, DType::BF16) => "cast_f16_bf16",
(DType::F16, DType::F32) => "cast_f16_f32",
(DType::F16, DType::I64) => "cast_f16_i64",
(DType::F16, DType::U32) => "cast_f16_u32",
(DType::F16, DType::U8) => "cast_f16_u8",
(DType::BF16, DType::F16) => "cast_bf16_f16",
(DType::BF16, DType::F32) => "cast_bf16_f32",
(DType::BF16, DType::I64) => "cast_bf16_i64",
(DType::BF16, DType::U32) => "cast_bf16_u32",
(DType::BF16, DType::U8) => "cast_bf16_u8",
(left, right) => {
crate::bail!("Metal contiguous to_dtype {left:?} {right:?} not implemented")
}
};
candle_metal_kernels::call_cast_contiguous(
&device.device,
&command_buffer,
&device.kernels,
kernel_name,
el_count,
src,
&buffer,
)
.map_err(MetalError::from)?;
} else {
let kernel_name = match (self.dtype, dtype) {
(DType::BF16, DType::F16) => "cast_bf16_f16_strided",
(DType::BF16, DType::F32) => "cast_bf16_f32_strided",
(DType::BF16, DType::I64) => "cast_bf16_i64_strided",
(DType::BF16, DType::U32) => "cast_bf16_u32_strided",
(DType::BF16, DType::U8) => "cast_bf16_u8_strided",
(DType::F16, DType::BF16) => "cast_f16_bf16_strided",
(DType::F16, DType::F32) => "cast_f16_f32_strided",
(DType::F16, DType::I64) => "cast_f16_i64_strided",
(DType::F16, DType::U32) => "cast_f16_u32_strided",
(DType::F16, DType::U8) => "cast_f16_u8_strided",
(DType::F32, DType::BF16) => "cast_f32_bf16_strided",
(DType::F32, DType::F16) => "cast_f32_f16_strided",
(DType::F32, DType::I64) => "cast_f32_i64_strided",
(DType::F32, DType::U32) => "cast_f32_u32_strided",
(DType::F32, DType::U8) => "cast_f32_u8_strided",
(DType::I64, DType::F32) => "cast_i64_f32_strided",
(DType::I64, DType::BF16) => "cast_i64_bf16_strided",
(DType::I64, DType::F16) => "cast_i64_f16_strided",
(DType::I64, DType::U32) => "cast_i64_u32_strided",
(DType::I64, DType::U8) => "cast_i64_u8_strided",
(DType::U32, DType::BF16) => "cast_u32_bf16_strided",
(DType::U32, DType::F16) => "cast_u32_f16_strided",
(DType::U32, DType::F32) => "cast_u32_f32_strided",
(DType::U32, DType::I64) => "cast_u32_i64_strided",
(DType::U32, DType::U8) => "cast_u32_u8_strided",
(DType::U8, DType::BF16) => "cast_u8_bf16_strided",
(DType::U8, DType::F16) => "cast_u8_f16_strided",
(DType::U8, DType::F32) => "cast_u8_f32_strided",
(DType::U8, DType::I64) => "cast_u8_i64_strided",
(DType::U8, DType::U32) => "cast_u8_u32_strided",
(left, right) => {
crate::bail!("Metal strided to_dtype {left:?} {right:?} not implemented")
}
};
candle_metal_kernels::call_cast_strided(
&device.device,
&command_buffer,
&device.kernels,
kernel_name,
layout.dims(),
src,
layout.stride(),
&buffer,
)
.map_err(MetalError::from)?;
}
command_buffer.set_label("to_dtype");
Ok(Self::new(buffer, device.clone(), el_count, dtype))
}
fn unary_impl<B: UnaryOpT>(&self, layout: &Layout) -> Result<Self> {
let device = self.device();
let dtype = self.dtype;
let shape = layout.shape();
let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype, B::KERNEL)?;
let command_buffer = device.command_buffer()?;
command_buffer.set_label(B::KERNEL);
let src = buffer_o(&self.buffer, layout, self.dtype);
match (el_count % 2, dtype, layout.is_contiguous()) {
(0, DType::BF16 | DType::F16, true) => {
use candle_metal_kernels::unary::contiguous_tiled;
let kernel_name = match (B::KERNEL, dtype) {
("uabs", DType::F16) => contiguous_tiled::abs::HALF,
("uabs", DType::F32) => contiguous_tiled::abs::FLOAT,
("uabs", DType::BF16) => contiguous_tiled::abs::BFLOAT,
("uceil", DType::F16) => contiguous_tiled::ceil::HALF,
("uceil", DType::F32) => contiguous_tiled::ceil::FLOAT,
("uceil", DType::BF16) => contiguous_tiled::ceil::BFLOAT,
("ucos", DType::F16) => contiguous_tiled::cos::HALF,
("ucos", DType::F32) => contiguous_tiled::cos::FLOAT,
("ucos", DType::BF16) => contiguous_tiled::cos::BFLOAT,
("uerf", DType::F16) => contiguous_tiled::erf::HALF,
("uerf", DType::F32) => contiguous_tiled::erf::FLOAT,
("uerf", DType::BF16) => contiguous_tiled::erf::BFLOAT,
("uexp", DType::F16) => contiguous_tiled::exp::HALF,
("uexp", DType::F32) => contiguous_tiled::exp::FLOAT,
("uexp", DType::BF16) => contiguous_tiled::exp::BFLOAT,
("ufloor", DType::F16) => contiguous_tiled::floor::HALF,
("ufloor", DType::F32) => contiguous_tiled::floor::FLOAT,
("ufloor", DType::BF16) => contiguous_tiled::floor::BFLOAT,
("ugelu_erf", DType::F16) => contiguous_tiled::gelu_erf::HALF,
("ugelu_erf", DType::F32) => contiguous_tiled::gelu_erf::FLOAT,
("ugelu_erf", DType::BF16) => contiguous_tiled::gelu_erf::BFLOAT,
("ugelu", DType::F16) => contiguous_tiled::gelu::HALF,
("ugelu", DType::F32) => contiguous_tiled::gelu::FLOAT,
("ugelu", DType::BF16) => contiguous_tiled::gelu::BFLOAT,
("ulog", DType::F16) => contiguous_tiled::log::HALF,
("ulog", DType::F32) => contiguous_tiled::log::FLOAT,
("ulog", DType::BF16) => contiguous_tiled::log::BFLOAT,
("uneg", DType::F16) => contiguous_tiled::neg::HALF,
("uneg", DType::F32) => contiguous_tiled::neg::FLOAT,
("uneg", DType::BF16) => contiguous_tiled::neg::BFLOAT,
("urecip", DType::F16) => contiguous_tiled::recip::HALF,
("urecip", DType::F32) => contiguous_tiled::recip::FLOAT,
("urecip", DType::BF16) => contiguous_tiled::recip::BFLOAT,
("urelu", DType::F16) => contiguous_tiled::relu::HALF,
("urelu", DType::F32) => contiguous_tiled::relu::FLOAT,
("urelu", DType::BF16) => contiguous_tiled::relu::BFLOAT,
("uround", DType::F16) => contiguous_tiled::round::HALF,
("uround", DType::F32) => contiguous_tiled::round::FLOAT,
("uround", DType::BF16) => contiguous_tiled::round::BFLOAT,
("usilu", DType::F16) => contiguous_tiled::silu::HALF,
("usilu", DType::F32) => contiguous_tiled::silu::FLOAT,
("usilu", DType::BF16) => contiguous_tiled::silu::BFLOAT,
("usin", DType::F16) => contiguous_tiled::sin::HALF,
("usin", DType::F32) => contiguous_tiled::sin::FLOAT,
("usin", DType::BF16) => contiguous_tiled::sin::BFLOAT,
("usqr", DType::F16) => contiguous_tiled::sqr::HALF,
("usqr", DType::F32) => contiguous_tiled::sqr::FLOAT,
("usqr", DType::BF16) => contiguous_tiled::sqr::BFLOAT,
("usqrt", DType::F16) => contiguous_tiled::sqrt::HALF,
("usqrt", DType::F32) => contiguous_tiled::sqrt::FLOAT,
("usqrt", DType::BF16) => contiguous_tiled::sqrt::BFLOAT,
("utanh", DType::F16) => contiguous_tiled::tanh::HALF,
("utanh", DType::F32) => contiguous_tiled::tanh::FLOAT,
("utanh", DType::BF16) => contiguous_tiled::tanh::BFLOAT,
("usign", DType::F16) => contiguous_tiled::sign::HALF,
("usign", DType::F32) => contiguous_tiled::sign::FLOAT,
("usign", DType::BF16) => contiguous_tiled::sign::BFLOAT,
("usign", DType::I64) => contiguous_tiled::sign::I64,
(name, dtype) => {
crate::bail!(
"Metal contiguous_tiled unary {name} {dtype:?} not implemented"
)
}
};
candle_metal_kernels::call_unary_contiguous_tiled(
&device.device,
&command_buffer,
&device.kernels,
kernel_name,
el_count,
src,
&buffer,
)
.map_err(MetalError::from)?;
}
(_, _, true) => {
use candle_metal_kernels::unary::contiguous;
let kernel_name = match (B::KERNEL, dtype) {
("uabs", DType::F16) => contiguous::abs::HALF,
("uabs", DType::F32) => contiguous::abs::FLOAT,
("uabs", DType::BF16) => contiguous::abs::BFLOAT,
("uceil", DType::F16) => contiguous::ceil::HALF,
("uceil", DType::F32) => contiguous::ceil::FLOAT,
("uceil", DType::BF16) => contiguous::ceil::BFLOAT,
("ucos", DType::F16) => contiguous::cos::HALF,
("ucos", DType::F32) => contiguous::cos::FLOAT,
("ucos", DType::BF16) => contiguous::cos::BFLOAT,
("uerf", DType::F16) => contiguous::erf::HALF,
("uerf", DType::F32) => contiguous::erf::FLOAT,
("uerf", DType::BF16) => contiguous::erf::BFLOAT,
("uexp", DType::F16) => contiguous::exp::HALF,
("uexp", DType::F32) => contiguous::exp::FLOAT,
("uexp", DType::BF16) => contiguous::exp::BFLOAT,
("ufloor", DType::F16) => contiguous::floor::HALF,
("ufloor", DType::F32) => contiguous::floor::FLOAT,
("ufloor", DType::BF16) => contiguous::floor::BFLOAT,
("ugelu_erf", DType::F16) => contiguous::gelu_erf::HALF,
("ugelu_erf", DType::F32) => contiguous::gelu_erf::FLOAT,
("ugelu_erf", DType::BF16) => contiguous::gelu_erf::BFLOAT,
("ugelu", DType::F16) => contiguous::gelu::HALF,
("ugelu", DType::F32) => contiguous::gelu::FLOAT,
("ugelu", DType::BF16) => contiguous::gelu::BFLOAT,
("ulog", DType::F16) => contiguous::log::HALF,
("ulog", DType::F32) => contiguous::log::FLOAT,
("ulog", DType::BF16) => contiguous::log::BFLOAT,
("uneg", DType::F16) => contiguous::neg::HALF,
("uneg", DType::F32) => contiguous::neg::FLOAT,
("uneg", DType::BF16) => contiguous::neg::BFLOAT,
("urecip", DType::F16) => contiguous::recip::HALF,
("urecip", DType::F32) => contiguous::recip::FLOAT,
("urecip", DType::BF16) => contiguous::recip::BFLOAT,
("urelu", DType::F16) => contiguous::relu::HALF,
("urelu", DType::F32) => contiguous::relu::FLOAT,
("urelu", DType::BF16) => contiguous::relu::BFLOAT,
("uround", DType::F16) => contiguous::round::HALF,
("uround", DType::F32) => contiguous::round::FLOAT,
("uround", DType::BF16) => contiguous::round::BFLOAT,
("usilu", DType::F16) => contiguous::silu::HALF,
("usilu", DType::F32) => contiguous::silu::FLOAT,
("usilu", DType::BF16) => contiguous::silu::BFLOAT,
("usin", DType::F16) => contiguous::sin::HALF,
("usin", DType::F32) => contiguous::sin::FLOAT,
("usin", DType::BF16) => contiguous::sin::BFLOAT,
("usqr", DType::F16) => contiguous::sqr::HALF,
("usqr", DType::F32) => contiguous::sqr::FLOAT,
("usqr", DType::BF16) => contiguous::sqr::BFLOAT,
("usqrt", DType::F16) => contiguous::sqrt::HALF,
("usqrt", DType::F32) => contiguous::sqrt::FLOAT,
("usqrt", DType::BF16) => contiguous::sqrt::BFLOAT,
("utanh", DType::F16) => contiguous::tanh::HALF,
("utanh", DType::F32) => contiguous::tanh::FLOAT,
("utanh", DType::BF16) => contiguous::tanh::BFLOAT,
("usign", DType::F16) => contiguous::sign::HALF,
("usign", DType::F32) => contiguous::sign::FLOAT,
("usign", DType::BF16) => contiguous::sign::BFLOAT,
("usign", DType::I64) => contiguous::sign::I64,
(name, dtype) => {
crate::bail!("Metal contiguous unary {name} {dtype:?} not implemented")
}
};
candle_metal_kernels::call_unary_contiguous(
&device.device,
&command_buffer,
&device.kernels,
kernel_name,
el_count,
src,
&buffer,
)
.map_err(MetalError::from)?;
}
(_, _, false) => {
use candle_metal_kernels::unary::strided;
let kernel_name = match (B::KERNEL, dtype) {
("ucos", DType::F32) => strided::cos::FLOAT,
("usin", DType::F32) => strided::sin::FLOAT,
("usqr", DType::F32) => strided::sqr::FLOAT,
("usqrt", DType::F32) => strided::sqrt::FLOAT,
("uneg", DType::F32) => strided::neg::FLOAT,
("uexp", DType::F32) => strided::exp::FLOAT,
("ulog", DType::F32) => strided::log::FLOAT,
("ugelu", DType::F32) => strided::gelu::FLOAT,
("ugelu_erf", DType::F32) => strided::gelu_erf::FLOAT,
("uerf", DType::F32) => strided::erf::FLOAT,
("usilu", DType::F32) => strided::silu::FLOAT,
("uabs", DType::F32) => strided::abs::FLOAT,
("uceil", DType::F32) => strided::ceil::FLOAT,
("ufloor", DType::F32) => strided::floor::FLOAT,
("urelu", DType::F32) => strided::relu::FLOAT,
("uround", DType::F32) => strided::round::FLOAT,
("utanh", DType::F32) => strided::tanh::FLOAT,
("ucos", DType::F16) => strided::cos::HALF,
("usin", DType::F16) => strided::sin::HALF,
("usqr", DType::F16) => strided::sqr::HALF,
("usqrt", DType::F16) => strided::sqrt::HALF,
("uneg", DType::F16) => strided::neg::HALF,
("uexp", DType::F16) => strided::exp::HALF,
("ulog", DType::F16) => strided::log::HALF,
("ugelu", DType::F16) => strided::gelu::HALF,
("ugelu_erf", DType::F16) => strided::gelu_erf::HALF,
("uerf", DType::F16) => strided::erf::HALF,
("usilu", DType::F16) => strided::silu::HALF,
("uabs", DType::F16) => strided::abs::HALF,
("uceil", DType::F16) => strided::ceil::HALF,
("ufloor", DType::F16) => strided::floor::HALF,
("urelu", DType::F16) => strided::relu::HALF,
("uround", DType::F16) => strided::round::HALF,
("utanh", DType::F16) => strided::tanh::HALF,
("ucos", DType::BF16) => strided::cos::BFLOAT,
("usin", DType::BF16) => strided::sin::BFLOAT,
("usqr", DType::BF16) => strided::sqr::BFLOAT,
("usqrt", DType::BF16) => strided::sqrt::BFLOAT,
("uneg", DType::BF16) => strided::neg::BFLOAT,
("uexp", DType::BF16) => strided::exp::BFLOAT,
("ulog", DType::BF16) => strided::log::BFLOAT,
("ugelu", DType::BF16) => strided::gelu::BFLOAT,
("ugelu_erf", DType::BF16) => strided::gelu_erf::BFLOAT,
("uerf", DType::BF16) => strided::erf::BFLOAT,
("usilu", DType::BF16) => strided::silu::BFLOAT,
("uabs", DType::BF16) => strided::abs::BFLOAT,
("uceil", DType::BF16) => strided::ceil::BFLOAT,
("ufloor", DType::BF16) => strided::floor::BFLOAT,
("urelu", DType::BF16) => strided::relu::BFLOAT,
("uround", DType::BF16) => strided::round::BFLOAT,
("utanh", DType::BF16) => strided::tanh::BFLOAT,
(name, dtype) => {
crate::bail!("Metal strided unary {name} {dtype:?} not implemented")
}
};
let dst = BufferOffset::zero_offset(&buffer);
candle_metal_kernels::call_unary_strided(
&device.device,
&command_buffer,
&device.kernels,
kernel_name,
layout.dims(),
src,
layout.stride(),
dst,
)
.map_err(MetalError::from)?;
}
}
Ok(Self::new(buffer, device.clone(), el_count, dtype))
}
fn binary_impl<B: BinaryOpT>(
&self,
rhs: &Self,
lhs_l: &Layout,
rhs_l: &Layout,
) -> Result<Self> {
self.binary(B::KERNEL, rhs, lhs_l, rhs_l)
}
fn where_cond(
&self,
layout: &Layout,
t: &Self,
t_l: &Layout,
f: &Self,
f_l: &Layout,
) -> Result<Self> {
let device = self.device.clone();
let shape = t_l.shape();
let dims = shape.dims();
let el = shape.elem_count();
let dtype = t.dtype;
let buffer = self.device.new_buffer(el, dtype, "where")?;
let command_buffer = self.device.command_buffer()?;
if t.dtype() != f.dtype() {
crate::bail!(
"Invalid where: different dtypes for values {:?} != {:?}",
t.dtype(),
f.dtype()
);
}
let name = match (self.dtype, t.dtype()) {
(DType::U8, DType::F32) => "where_u8_f32",
(DType::U32, DType::F32) => "where_u32_f32",
(DType::U8, DType::BF16) => "where_u8_bf16",
(DType::U8, DType::F16) => "where_u8_f16",
(DType::U8, DType::I64) => "where_u8_i64",
(DType::U8, DType::U32) => "where_u8_u32",
(DType::U8, DType::U8) => "where_u8_u8",
(left, right) => crate::bail!("Metal where_cond {left:?} {right:?} not implemented"),
};
let src = buffer_o(&self.buffer, layout, self.dtype);
let t = buffer_o(&t.buffer, t_l, t.dtype);
let f = buffer_o(&f.buffer, f_l, f.dtype);
candle_metal_kernels::call_where_cond_strided(
&device.device,
&command_buffer,
&device.kernels,
name,
dims,
src,
layout.stride(),
t,
t_l.stride(),
f,
f_l.stride(),
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::new(buffer, device, el, dtype))
}
fn conv1d(
&self,
layout: &Layout,
kernel: &Self,
kernel_l: &Layout,
params: &ParamsConv1D,
) -> Result<Self> {
let device = self.device().clone();
let shape = layout.shape();
let dims = shape.dims();
let strides = layout.stride();
let stride = params.stride;
let dilation = params.dilation;
let padding = params.padding;
let k_size = params.k_size;
let l_out = (dims[2] + 2 * padding - dilation * (k_size - 1) - 1) / stride + 1;
let dst_el = dims[0] * l_out * dims[1] * k_size;
let dst = self
.device
.new_buffer(dst_el, self.dtype, "conv1d_im2col")?;
let command_buffer = self.device.command_buffer()?;
let name = match self.dtype {
DType::F32 => "im2col1d_f32",
dtype => crate::bail!("Metal conv1d {dtype:?} not implemented"),
};
let src = buffer_o(&self.buffer, layout, self.dtype);
candle_metal_kernels::call_im2col1d_strided(
&self.device.device,
&command_buffer,
&self.device.kernels,
name,
layout.shape().dims(),
strides,
(k_size, stride, padding, dilation),
src,
&dst,
)
.map_err(MetalError::from)?;
let col = Self {
buffer: dst,
device,
count: dst_el,
dtype: self.dtype,
};
let l_out = params.l_out();
let b = params.b_size;
let n = params.c_out;
let k = params.k_size * params.c_in;
let m = l_out;
let col_l = Layout::contiguous((b, m, k));
let res = if kernel_l.is_contiguous() {
let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset())
.transpose(1, 2)?
.broadcast_as((b, k, n))?;
col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)?
} else {
// Make the kernel contiguous if not already the case.
let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?;
kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?;
let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset())
.transpose(1, 2)?
.broadcast_as((b, k, n))?;
col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)?
};
let res_l = Layout::contiguous((b, l_out, n)).transpose(1, 2)?;
let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?;
res.copy_strided_src(&mut res_t, 0, &res_l)?;
Ok(res_t)
}
fn conv_transpose1d(
&self,
layout: &Layout,
k: &Self,
k_layout: &Layout,
params: &ParamsConvTranspose1D,
) -> Result<Self> {
const USE_COL2IM_CONV1D_TR: bool = true;
let can_use_col2im = k_layout.is_contiguous()
&& params.dilation == 1
&& params.padding == 0
&& params.output_padding == 0;
let l_out = params.l_out();
let dst_el = params.c_out * l_out * params.b_size;
let buffer = if USE_COL2IM_CONV1D_TR && can_use_col2im {
let (b_size, c_in, l_in) = layout.shape().dims3()?;
let (c_in2, c_out, k_size) = k_layout.shape().dims3()?;
if c_in != c_in2 {
crate::bail!(
"convtr1d: shape mismatch on c_in {:?} {:?}",
layout.shape(),
k_layout.shape()
)
}
let buffer = self
.device
.new_buffer(dst_el, self.dtype, "conv_transpose1d")?;
let name = match self.dtype {
DType::F32 => "col2im1d_f32",
DType::U32 => "col2im1d_u32",
DType::U8 => "col2im1d_u8",
dtype => crate::bail!("metal col2im1d {dtype:?} not implemented"),
};
let col = {
// This merges the last two dimensions of the kernel together.
let kernel_l_mm = Layout::new(
(b_size, c_in, k_size * c_out).into(),
vec![0, k_size * c_out, 1],
k_layout.start_offset(),
);
self.matmul(
k,
(b_size, l_in, c_out * k_size, c_in),
&layout.transpose(1, 2)?,
&kernel_l_mm,
)?
};
// It is important for the command buffer to be obtained *after* the matmul
// kernel has run, otherwise we might use a command-buffer that has been commited
// already resulting in the following error.
// _status < MTLCommandBufferStatusCommitted >
// -[IOGPUMetalCommandBuffer setCurrentCommandEncoder:]
let command_buffer = self.device.command_buffer()?;
candle_metal_kernels::call_col2im1d(
&self.device.device,
&command_buffer,
&self.device.kernels,
name,
&[b_size, l_in, c_out, k_size],
params.k_size,
params.stride,
BufferOffset::zero_offset(&col.buffer),
&buffer,
)
.map_err(MetalError::from)?;
buffer
} else {
let buffer = self
.device
.new_buffer(dst_el, self.dtype, "conv_transpose1d")?;
let command_buffer = self.device.command_buffer()?;
let name = match self.dtype {
DType::F32 => "conv_transpose1d_f32",
DType::F16 => "conv_transpose1d_f16",
DType::BF16 => "conv_transpose1d_bf16",
DType::U32 => "conv_transpose1d_u32",
DType::U8 => "conv_transpose1d_u8",
dtype => crate::bail!("Metal conv_transpose1d {dtype:?} not implemented"),
};
candle_metal_kernels::call_conv_transpose1d(
&self.device.device,
&command_buffer,
&self.device.kernels,
name,
params.dilation,
params.stride,
params.padding,
params.output_padding,
params.c_out,
l_out,
params.b_size,
layout.dims(),
layout.stride(),
k_layout.dims(),
k_layout.stride(),
&self.buffer,
layout.start_offset() * self.dtype.size_in_bytes(),
&k.buffer,
k_layout.start_offset() * k.dtype.size_in_bytes(),
&buffer,
)
.map_err(MetalError::from)?;
buffer
};
Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype))
}
fn conv2d(
&self,
layout: &Layout,
kernel: &Self,
kernel_l: &Layout,
params: &ParamsConv2D,
) -> Result<Self> {
let device = self.device().clone();
let shape = layout.shape();
let dims = shape.dims();
let stride = params.stride;
let dilation = params.dilation;
let padding = params.padding;
let h_k = params.k_h;
let w_k = params.k_w;
let h = dims[2];
let w = dims[3];
let h_out = (h + 2 * padding - dilation * (h_k - 1) - 1) / stride + 1;
let w_out = (w + 2 * padding - dilation * (w_k - 1) - 1) / stride + 1;
let dst_el = dims[0] * h_out * w_out * dims[1] * h_k * w_k;
let dst = self
.device
.new_buffer(dst_el, self.dtype, "conv2d_im2col")?;
let command_buffer = self.device.command_buffer()?;
let name = match self.dtype {
DType::F32 => "im2col_f32",
DType::F16 => "im2col_f16",
DType::BF16 => "im2col_bf16",
DType::U8 => "im2col_u8",
DType::U32 => "im2col_u32",
dtype => crate::bail!("Metal conv2d {dtype:?} not implemented"),
};
let src = buffer_o(&self.buffer, layout, self.dtype);
candle_metal_kernels::call_im2col_strided(
&self.device.device,
&command_buffer,
&self.device.kernels,
name,
layout.shape().dims(),
layout.stride(),
(h_k, w_k, stride, padding, dilation),
src,
&dst,
)
.map_err(MetalError::from)?;
let col = Self {
buffer: dst,
device,
count: dst_el,
dtype: self.dtype,
};
let h_out = params.out_h();
let w_out = params.out_w();
let b = params.b_size;
let n = params.c_out;
let k = params.k_h * params.k_w * params.c_in;
let m = h_out * w_out;
let col_l = Layout::contiguous((b, m, k));
let res = if kernel_l.is_contiguous() {
let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset())
.transpose(1, 2)?
.broadcast_as((b, k, n))?;
col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)?
} else {
// Make the kernel contiguous if not already the case.
let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?;
kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?;
let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset())
.transpose(1, 2)?
.broadcast_as((b, k, n))?;
col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)?
};
let res_l = Layout::contiguous((b, h_out, w_out, n))
.transpose(1, 2)?
.transpose(1, 3)?;
let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?;
res.copy_strided_src(&mut res_t, 0, &res_l)?;
Ok(res_t)
}
fn conv_transpose2d(
&self,
l: &Layout,
kernel: &Self,
kernel_l: &Layout,
params: &ParamsConvTranspose2D,
) -> Result<Self> {
// Kernel shape: (c_in_k, c_out, h_k, w_k)
// Input shape: (b_size, c_in, h_in, w_in)
let (out_w, out_h) = (params.out_w(), params.out_h());
let dst_el = params.c_out * out_w * out_h * params.b_size;
let dims = l.dims();
if dims.len() != 4 {
crate::bail!("unexpected input shape for conv_transpose2d {dims:?}, expected 4")
}
let k_dims = kernel_l.dims();
if k_dims.len() != 4 {
crate::bail!("unexpected kernel shape for conv_transpose2d {k_dims:?}, expected 4")
}
let buffer = self
.device
.new_buffer(dst_el, self.dtype, "conv_transpose2d")?;
let command_buffer = self.device.command_buffer()?;
let name = match self.dtype {
DType::F32 => "conv_transpose2d_f32",
DType::F16 => "conv_transpose2d_f16",
DType::BF16 => "conv_transpose2d_bf16",
dtype => crate::bail!("Metal conv_transpose2d {dtype:?} not implemented"),
};
candle_metal_kernels::call_conv_transpose2d(
&self.device.device,
&command_buffer,
&self.device.kernels,
name,
CallConvTranspose2dCfg {
dilation: params.dilation,
stride: params.stride,
padding: params.padding,
output_padding: params.output_padding,
c_out: params.c_out,
out_h,
out_w,
b_size: params.b_size,
input_dims: l.dims(),
input_stride: l.stride(),
kernel_dims: kernel_l.dims(),
kernel_stride: kernel_l.stride(),
input_offset: l.start_offset() * self.dtype.size_in_bytes(),
kernel_offset: kernel_l.start_offset() * kernel.dtype.size_in_bytes(),
},
&self.buffer,
&kernel.buffer,
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype))
}
fn avg_pool2d(
&self,
inp_l: &Layout,
(w_k, h_k): (usize, usize),
(w_stride, h_stride): (usize, usize),
) -> Result<Self> {
let shape = inp_l.shape();
let (b_size, channels, width, height) = shape.dims4()?;
let strides = inp_l.stride();
let name = match self.dtype {
DType::F32 => "avg_pool2d_f32",
DType::F16 => "avg_pool2d_f16",
DType::BF16 => "avg_pool2d_bf16",
DType::U8 => "avg_pool2d_u8",
DType::U32 => "avg_pool2d_u32",
dtype => crate::bail!("Metal avg_pool2d {dtype:?} not implemented"),
};
let out_w = (width - w_k) / w_stride + 1;
let out_h = (height - h_k) / h_stride + 1;
let dst_el = out_w * out_h * b_size * channels;
let buffer = self.device.new_buffer(dst_el, self.dtype, "avg_pool2d")?;
let command_buffers = self.device.command_buffer()?;
candle_metal_kernels::call_pool2d(
&self.device.device,
&command_buffers,
&self.device.kernels,
name,
inp_l.dims(),
strides,
out_w,
out_h,
w_k,
h_k,
w_stride,
h_stride,
&self.buffer,
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype))
}
fn max_pool2d(
&self,
inp_l: &Layout,
(w_k, h_k): (usize, usize),
(w_stride, h_stride): (usize, usize),
) -> Result<Self> {
let shape = inp_l.shape();
let (b_size, channels, width, height) = shape.dims4()?;
let strides = inp_l.stride();
let name = match self.dtype {
DType::F32 => "max_pool2d_f32",
DType::F16 => "max_pool2d_f16",
DType::BF16 => "max_pool2d_bf16",
DType::U8 => "max_pool2d_u8",
DType::U32 => "max_pool2d_u32",
dtype => crate::bail!("Metal max_pool2d {dtype:?} not implemented"),
};
let out_w = (width - w_k) / w_stride + 1;
let out_h = (height - h_k) / h_stride + 1;
let dst_el = out_w * out_h * b_size * channels;
let buffer = self.device.new_buffer(dst_el, self.dtype, "max_pool2d")?;
let command_buffers = self.device.command_buffer()?;
candle_metal_kernels::call_pool2d(
&self.device.device,
&command_buffers,
&self.device.kernels,
name,
inp_l.dims(),
strides,
out_w,
out_h,
w_k,
h_k,
w_stride,
h_stride,
&self.buffer,
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype))
}
fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self> {
crate::bail!("Metal upsample_nearest1d not implemented")
}
fn upsample_nearest2d(&self, inp_l: &Layout, out_w: usize, out_h: usize) -> Result<Self> {
// let inp = &inp.slice(inp_l.start_offset()..);
let shape = inp_l.shape();
let dims = shape.dims();
let strides = inp_l.stride();
if dims.len() != 4 {
crate::bail!("unexpected input shape for upsample {dims:?}")
}
let name = match self.dtype {
DType::F32 => "upsample_nearest2d_f32",
DType::F16 => "upsample_nearest2d_f16",
DType::BF16 => "upsample_nearest2d_bf16",
DType::U8 => "upsample_nearest2d_u8",
DType::U32 => "upsample_nearest2d_u32",
dtype => crate::bail!("Metal upsample_nearest2d {dtype:?} not implemented"),
};
let dst_el = out_w * out_h * dims[0] * dims[1];
let buffer = self
.device
.new_buffer(dst_el, self.dtype, "upsample_nearest2d")?;
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&self.buffer, inp_l, self.dtype);
candle_metal_kernels::call_upsample_nearest_2d(
&self.device.device,
&command_buffer,
&self.device.kernels,
name,
dims,
strides,
out_w,
out_h,
src,
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype))
}
fn gather(&self, src_l: &Layout, ids: &Self, ids_l: &Layout, dim: usize) -> Result<Self> {
if !ids_l.is_contiguous() {
return Err(crate::Error::RequiresContiguous { op: "gather" }.bt());
};
let ids_el = ids_l.dims()[dim];
let dst_el = ids_l.shape().elem_count();
let dtype = self.dtype;
let device = self.device();
let buffer = device.new_buffer(dst_el, dtype, "gather")?;
let name = match (ids.dtype, self.dtype) {
(DType::U32, DType::F32) => "gather_u32_f32",
(DType::U32, DType::F16) => "gather_u32_f16",
(DType::U32, DType::BF16) => "gather_u32_bf16",
(DType::U32, DType::U32) => "gather_u32_u32",
(DType::U32, DType::I64) => "gather_u32_i64",
(DType::I64, DType::F32) => "gather_i64_f32",
(DType::I64, DType::F16) => "gather_i64_f16",
(DType::I64, DType::BF16) => "gather_i64_bf16",
(DType::I64, DType::U32) => "gather_i64_u32",
(DType::I64, DType::I64) => "gather_i64_i64",
(left, right) => crate::bail!("Metal gather {left:?} {right:?} not implemented"),
};
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&self.buffer, src_l, dtype);
let ids = buffer_o(&ids.buffer, ids_l, ids.dtype);
candle_metal_kernels::call_gather(
&device.device,
&command_buffer,
&self.device.kernels,
name,
src_l.dims(),
ids_el,
dim,
src,
ids,
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::new(buffer, device.clone(), dst_el, dtype))
}
fn scatter_add(
&self,
l: &Layout,
ids: &Self,
ids_l: &Layout,
src: &Self,
src_l: &Layout,
dim: usize,
) -> Result<Self> {
let mut acc = self.device.zeros_impl(l.shape(), self.dtype())?;
self.copy_strided_src(&mut acc, 0, l)?;
if !ids_l.is_contiguous() || !src_l.is_contiguous() {
return Err(crate::Error::RequiresContiguous { op: "scatter-add" }.bt());
};
let name = match (ids.dtype, self.dtype) {
(DType::U8, DType::F32) => "sa_u8_f32",
(DType::U8, DType::F16) => "sa_u8_f16",
(DType::U8, DType::BF16) => "sa_u8_bf16",
(DType::U32, DType::U32) => "sa_u32_u32",
(DType::U32, DType::F32) => "sa_u32_f32",
(DType::U32, DType::F16) => "sa_u32_f16",
(DType::U32, DType::BF16) => "sa_u32_bf16",
(DType::I64, DType::F32) => "sa_i64_f32",
(DType::I64, DType::F16) => "sa_i64_f16",
(DType::I64, DType::BF16) => "sa_i64_bf16",
_ => Err(MetalError::UnexpectedDType {
msg: "scatter-add ids should be u8/u32/i64",
expected: DType::U32,
got: ids.dtype(),
})?,
};
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&src.buffer, src_l, src.dtype);
let ids = buffer_o(&ids.buffer, ids_l, ids.dtype);
candle_metal_kernels::call_scatter_add(
&self.device.device,
&command_buffer,
&self.device.kernels,
name,
src_l.dims(),
l.dims(),
dim,
src,
ids,
&acc.buffer,
)
.map_err(MetalError::from)?;
Ok(acc)
}
fn index_select(&self, ids: &Self, src_l: &Layout, ids_l: &Layout, dim: usize) -> Result<Self> {
if !ids_l.is_contiguous() {
crate::bail!("Metal index_select requires contiguous ids")
}
let left_size: usize = src_l.dims()[..dim].iter().product();
let right_size: usize = src_l.dims()[dim + 1..].iter().product();
let ids_el = ids_l.shape().elem_count();
let dst_el = ids_el * left_size * right_size;
let dtype = self.dtype;
let device = self.device();
let buffer = device.new_buffer(dst_el, dtype, "index_select")?;
let name = match (ids.dtype, self.dtype) {
(DType::U8, DType::U8) => "is_u8_u8",
(DType::U8, DType::U32) => "is_u8_u32",
(DType::U8, DType::I64) => "is_u8_i64",
(DType::U8, DType::BF16) => "is_u8_bf16",
(DType::U8, DType::F32) => "is_u8_f32",
(DType::U8, DType::F16) => "is_u8_f16",
(DType::U32, DType::U8) => "is_u32_u8",
(DType::U32, DType::U32) => "is_u32_u32",
(DType::U32, DType::I64) => "is_u32_i64",
(DType::U32, DType::F32) => "is_u32_f32",
(DType::U32, DType::F16) => "is_u32_f16",
(DType::U32, DType::BF16) => "is_u32_bf16",
(DType::I64, DType::U8) => "is_i64_u8",
(DType::I64, DType::U32) => "is_i64_u32",
(DType::I64, DType::I64) => "is_i64_i64",
(DType::I64, DType::F32) => "is_i64_f32",
(DType::I64, DType::F16) => "is_i64_f16",
(DType::I64, DType::BF16) => "is_i64_bf16",
(left, right) => {
crate::bail!("Metal contiguous index_select {left:?} {right:?} not implemented")
}
};
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&self.buffer, src_l, dtype);
let ids = buffer_o(&ids.buffer, ids_l, ids.dtype);
candle_metal_kernels::call_index_select(
&device.device,
&command_buffer,
&self.device.kernels,
name,
src_l.dims(),
ids_el,
dim,
src_l.is_contiguous(),
src_l.dims(),
src_l.stride(),
src,
ids,
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::new(buffer, device.clone(), dst_el, dtype))
}
fn index_add(
&self,
l: &Layout,
ids: &Self,
ids_l: &Layout,
src: &Self,
src_l: &Layout,
dim: usize,
) -> Result<Self> {
let mut acc = self.device.zeros_impl(l.shape(), self.dtype())?;
self.copy_strided_src(&mut acc, 0, l)?;
if !ids_l.is_contiguous() || !src_l.is_contiguous() {
return Err(crate::Error::RequiresContiguous { op: "index-add" }.bt());
};
let name = match (ids.dtype, self.dtype) {
(DType::I64, DType::BF16) => "ia_i64_bf16",
(DType::I64, DType::F16) => "ia_i64_f16",
(DType::I64, DType::F32) => "ia_i64_f32",
(DType::I64, DType::I64) => "ia_i64_i64",
(DType::I64, DType::U32) => "ia_i64_u32",
(DType::I64, DType::U8) => "ia_i64_u8",
(DType::U32, DType::BF16) => "ia_u32_bf16",
(DType::U32, DType::F16) => "ia_u32_f16",
(DType::U32, DType::F32) => "ia_u32_f32",
(DType::U32, DType::I64) => "ia_u32_i64",
(DType::U32, DType::U32) => "ia_u32_u32",
(DType::U32, DType::U8) => "ia_u32_u8",
(DType::U8, DType::BF16) => "ia_u8_bf16",
(DType::U8, DType::F16) => "ia_u8_f16",
(DType::U8, DType::F32) => "ia_u8_f32",
(DType::U8, DType::I64) => "ia_u8_i64",
(DType::U8, DType::U32) => "ia_u8_u32",
(DType::U8, DType::U8) => "ia_u8_u8",
_ => Err(MetalError::UnexpectedDType {
msg: "index-add ids should be u8/u32/i64",
expected: DType::U32,
got: ids.dtype(),
})?,
};
let command_buffer = self.device.command_buffer()?;
let src = buffer_o(&src.buffer, src_l, src.dtype);
let ids = buffer_o(&ids.buffer, ids_l, ids.dtype);
candle_metal_kernels::call_index_add(
&self.device.device,
&command_buffer,
&self.device.kernels,
name,
src_l.dims(),
l.dims(),
ids_l.dims(),
dim,
src,
ids,
&acc.buffer,
)
.map_err(MetalError::from)?;
Ok(acc)
}
fn matmul(
&self,
rhs: &Self,
(b, m, n, k): (usize, usize, usize, usize),
lhs_l: &Layout,
rhs_l: &Layout,
) -> Result<Self> {
let buffer = self.device.new_buffer(b * m * n, self.dtype, "matmul")?;
let command_buffer = self.device.command_buffer()?;
command_buffer.set_label("matmul");
if self.dtype == DType::BF16 {
candle_metal_kernels::call_mlx_gemm(
&self.device.device,
&command_buffer,
&self.device.kernels,
candle_metal_kernels::GemmDType::BF16,
(b, m, n, k),
lhs_l.stride(),
lhs_l.start_offset() * self.dtype.size_in_bytes(),
&self.buffer,
rhs_l.stride(),
rhs_l.start_offset() * rhs.dtype.size_in_bytes(),
&rhs.buffer,
&buffer,
)
.map_err(MetalError::from)?;
} else {
let dtype = match self.dtype {
DType::F32 => candle_metal_kernels::GemmDType::F32,
DType::F16 => candle_metal_kernels::GemmDType::F16,
DType::BF16 => candle_metal_kernels::GemmDType::BF16,
dtype => {
return Err(MetalError::Message(format!(
"mlx matmul doesn't support {dtype:?}"
))
.into())
}
};
candle_metal_kernels::call_mlx_gemm(
&self.device.device,
&command_buffer,
&self.device.kernels,
dtype,
(b, m, n, k),
lhs_l.stride(),
lhs_l.start_offset() * self.dtype.size_in_bytes(),
&self.buffer,
rhs_l.stride(),
rhs_l.start_offset() * rhs.dtype.size_in_bytes(),
&rhs.buffer,
&buffer,
)
.map_err(MetalError::from)?;
}
Ok(Self::new(
buffer,
self.device.clone(),
b * m * n,
self.dtype(),
))
}
fn copy2d(
&self,
dst: &mut Self,
d1: usize,
d2: usize,
src_s: usize,
dst_s: usize,
src_o: usize,
dst_o: usize,
) -> Result<()> {
if self.dtype() != dst.dtype() {
crate::bail!(
"copy2d with inconsistent dtypes {:?} {:?}",
self.dtype(),
dst.dtype()
)
}
let command_buffer = self.device.command_buffer()?;
if src_s == d2 && dst_s == d2 {
command_buffer.set_label("copy2d_contiguous");
let blit = command_buffer.new_blit_command_encoder();
blit.set_label("copy2d_contiguous");
let src_offset = (src_o * self.dtype.size_in_bytes()) as NSUInteger;
let length = (d1 * d2 * self.dtype.size_in_bytes()) as NSUInteger;
let dst_offset = (dst_o * dst.dtype().size_in_bytes()) as NSUInteger;
blit.copy_from_buffer(&self.buffer, src_offset, dst.buffer(), dst_offset, length);
blit.end_encoding();
} else {
let el_count = d1 * d2;
if el_count == 0 {
return Ok(());
}
let kernel_name = match self.dtype {
DType::F32 => candle_metal_kernels::copy2d::FLOAT,
DType::F16 => candle_metal_kernels::copy2d::HALF,
DType::BF16 => candle_metal_kernels::copy2d::BFLOAT,
DType::I64 => candle_metal_kernels::copy2d::I64,
DType::U32 => candle_metal_kernels::copy2d::U32,
DType::U8 => candle_metal_kernels::copy2d::U8,
dtype => crate::bail!("Metal copy2d {dtype:?} not implemented"),
};
candle_metal_kernels::call_copy2d(
&self.device.device,
&command_buffer,
&self.device.kernels,
kernel_name,
&self.buffer,
&dst.buffer,
d1,
d2,
src_s,
dst_s,
src_o * self.dtype.size_in_bytes(),
dst_o * self.dtype.size_in_bytes(),
)
.map_err(MetalError::from)?;
command_buffer.set_label("copy2d");
}
Ok(())
}
fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> {
let command_buffer = self.device.command_buffer()?;
if src_l.is_contiguous() && self.dtype == dst.dtype() {
command_buffer.set_label("copy_contiguous");
let blit = command_buffer.new_blit_command_encoder();
blit.set_label("copy_contiguous");
let src_offset = (src_l.start_offset() * self.dtype.size_in_bytes()) as NSUInteger;
let length = (src_l.shape().elem_count() * self.dtype.size_in_bytes()) as NSUInteger;
let dst_offset = (dst_offset * dst.dtype().size_in_bytes()) as NSUInteger;
blit.copy_from_buffer(&self.buffer, src_offset, dst.buffer(), dst_offset, length);
blit.end_encoding();
} else {
let src_shape = src_l.shape();
let el_count = src_shape.elem_count();
if el_count == 0 {
return Ok(());
}
let kernel_name = match self.dtype {
DType::F32 => candle_metal_kernels::unary::strided::copy::FLOAT,
DType::F16 => candle_metal_kernels::unary::strided::copy::HALF,
DType::BF16 => candle_metal_kernels::unary::strided::copy::BFLOAT,
DType::I64 => candle_metal_kernels::unary::strided::copy::I64,
DType::U32 => candle_metal_kernels::unary::strided::copy::U32,
DType::U8 => candle_metal_kernels::unary::strided::copy::U8,
dtype => crate::bail!("Metal copy_strided {dtype:?} not implemented"),
};
let src = buffer_o(&self.buffer, src_l, self.dtype);
let dst = BufferOffset {
buffer: &dst.buffer,
offset_in_bytes: dst_offset * dst.dtype.size_in_bytes(),
};
candle_metal_kernels::call_unary_strided(
&self.device.device,
&command_buffer,
&self.device.kernels,
kernel_name,
src_l.dims(),
src,
src_l.stride(),
dst,
)
.map_err(MetalError::from)?;
command_buffer.set_label("copy_strided");
}
Ok(())
}
}
impl MetalStorage {
pub fn new(buffer: Arc<Buffer>, device: MetalDevice, count: usize, dtype: DType) -> Self {
Self {
buffer,
device,
count,
dtype,
}
}
pub fn buffer(&self) -> &Buffer {
&self.buffer
}
pub fn binary(
&self,
op: &'static str,
rhs: &Self,
lhs_l: &Layout,
rhs_l: &Layout,
) -> Result<Self> {
let device = self.device();
let shape = lhs_l.shape();
let el_count = shape.elem_count();
let command_buffer = device.command_buffer()?;
let lhs = buffer_o(&self.buffer, lhs_l, self.dtype);
let rhs = buffer_o(&rhs.buffer, rhs_l, rhs.dtype);
let (buffer, dtype) = if lhs_l.is_contiguous() && rhs_l.is_contiguous() && &op[..1] != "b" {
use candle_metal_kernels::binary::contiguous;
let (kernel_name, dtype) = match (op, self.dtype) {
("add", DType::F32) => (contiguous::add::FLOAT, self.dtype),
("sub", DType::F32) => (contiguous::sub::FLOAT, self.dtype),
("mul", DType::F32) => (contiguous::mul::FLOAT, self.dtype),
("div", DType::F32) => (contiguous::div::FLOAT, self.dtype),
("eq", DType::F32) => (contiguous::eq::FLOAT, DType::U8),
("ne", DType::F32) => (contiguous::ne::FLOAT, DType::U8),
("le", DType::F32) => (contiguous::le::FLOAT, DType::U8),
("lt", DType::F32) => (contiguous::lt::FLOAT, DType::U8),
("ge", DType::F32) => (contiguous::ge::FLOAT, DType::U8),
("gt", DType::F32) => (contiguous::gt::FLOAT, DType::U8),
("add", DType::F16) => (contiguous::add::HALF, self.dtype),
("sub", DType::F16) => (contiguous::sub::HALF, self.dtype),
("mul", DType::F16) => (contiguous::mul::HALF, self.dtype),
("div", DType::F16) => (contiguous::div::HALF, self.dtype),
("eq", DType::F16) => (contiguous::eq::HALF, DType::U8),
("ne", DType::F16) => (contiguous::ne::HALF, DType::U8),
("le", DType::F16) => (contiguous::le::HALF, DType::U8),
("lt", DType::F16) => (contiguous::lt::HALF, DType::U8),
("ge", DType::F16) => (contiguous::ge::HALF, DType::U8),
("gt", DType::F16) => (contiguous::gt::HALF, DType::U8),
("add", DType::BF16) => (contiguous::add::BFLOAT, self.dtype),
("sub", DType::BF16) => (contiguous::sub::BFLOAT, self.dtype),
("mul", DType::BF16) => (contiguous::mul::BFLOAT, self.dtype),
("div", DType::BF16) => (contiguous::div::BFLOAT, self.dtype),
("eq", DType::BF16) => (contiguous::eq::BFLOAT, DType::U8),
("ne", DType::BF16) => (contiguous::ne::BFLOAT, DType::U8),
("le", DType::BF16) => (contiguous::le::BFLOAT, DType::U8),
("lt", DType::BF16) => (contiguous::lt::BFLOAT, DType::U8),
("ge", DType::BF16) => (contiguous::ge::BFLOAT, DType::U8),
("gt", DType::BF16) => (contiguous::gt::BFLOAT, DType::U8),
("add", DType::I64) => (contiguous::add::I64, self.dtype),
("sub", DType::I64) => (contiguous::sub::I64, self.dtype),
("mul", DType::I64) => (contiguous::mul::I64, self.dtype),
("div", DType::I64) => (contiguous::div::I64, self.dtype),
("eq", DType::I64) => (contiguous::eq::I64, DType::U8),
("ne", DType::I64) => (contiguous::ne::I64, DType::U8),
("le", DType::I64) => (contiguous::le::I64, DType::U8),
("lt", DType::I64) => (contiguous::lt::I64, DType::U8),
("ge", DType::I64) => (contiguous::ge::I64, DType::U8),
("gt", DType::I64) => (contiguous::gt::I64, DType::U8),
("add", DType::U32) => (contiguous::add::U32, self.dtype),
("sub", DType::U32) => (contiguous::sub::U32, self.dtype),
("mul", DType::U32) => (contiguous::mul::U32, self.dtype),
("div", DType::U32) => (contiguous::div::U32, self.dtype),
("eq", DType::U32) => (contiguous::eq::U32, DType::U8),
("ne", DType::U32) => (contiguous::ne::U32, DType::U8),
("le", DType::U32) => (contiguous::le::U32, DType::U8),
("lt", DType::U32) => (contiguous::lt::U32, DType::U8),
("ge", DType::U32) => (contiguous::ge::U32, DType::U8),
("gt", DType::U32) => (contiguous::gt::U32, DType::U8),
("add", DType::U8) => (contiguous::add::U8, self.dtype),
("sub", DType::U8) => (contiguous::sub::U8, self.dtype),
("mul", DType::U8) => (contiguous::mul::U8, self.dtype),
("div", DType::U8) => (contiguous::div::U8, self.dtype),
("eq", DType::U8) => (contiguous::eq::U8, DType::U8),
("ne", DType::U8) => (contiguous::ne::U8, DType::U8),
("le", DType::U8) => (contiguous::le::U8, DType::U8),
("lt", DType::U8) => (contiguous::lt::U8, DType::U8),
("ge", DType::U8) => (contiguous::ge::U8, DType::U8),
("gt", DType::U8) => (contiguous::gt::U8, DType::U8),
(name, dtype) => {
crate::bail!("Metal contiguous binary {name} {dtype:?} not implemented")
}
};
let buffer = device.new_buffer(el_count, dtype, op)?;
candle_metal_kernels::call_binary_contiguous(
&device.device,
&command_buffer,
&device.kernels,
kernel_name,
el_count,
lhs,
rhs,
&buffer,
)
.map_err(MetalError::from)?;
(buffer, dtype)
} else {
use candle_metal_kernels::binary::strided;
let (kernel_name, dtype) = match (op, self.dtype) {
("badd", DType::F32) => (strided::add::FLOAT, self.dtype),
("bsub", DType::F32) => (strided::sub::FLOAT, self.dtype),
("bmul", DType::F32) => (strided::mul::FLOAT, self.dtype),
("bdiv", DType::F32) => (strided::div::FLOAT, self.dtype),
("bminimum", DType::F32) => (strided::min::FLOAT, self.dtype),
("bmaximum", DType::F32) => (strided::max::FLOAT, self.dtype),
("eq", DType::F32) => (strided::eq::FLOAT, DType::U8),
("ne", DType::F32) => (strided::ne::FLOAT, DType::U8),
("le", DType::F32) => (strided::le::FLOAT, DType::U8),
("lt", DType::F32) => (strided::lt::FLOAT, DType::U8),
("ge", DType::F32) => (strided::ge::FLOAT, DType::U8),
("gt", DType::F32) => (strided::gt::FLOAT, DType::U8),
("badd", DType::F16) => (strided::add::HALF, self.dtype),
("bsub", DType::F16) => (strided::sub::HALF, self.dtype),
("bmul", DType::F16) => (strided::mul::HALF, self.dtype),
("bdiv", DType::F16) => (strided::div::HALF, self.dtype),
("bminimum", DType::F16) => (strided::min::HALF, self.dtype),
("bmaximum", DType::F16) => (strided::max::HALF, self.dtype),
("eq", DType::F16) => (strided::eq::HALF, DType::U8),
("ne", DType::F16) => (strided::ne::HALF, DType::U8),
("le", DType::F16) => (strided::le::HALF, DType::U8),
("lt", DType::F16) => (strided::lt::HALF, DType::U8),
("ge", DType::F16) => (strided::ge::HALF, DType::U8),
("gt", DType::F16) => (strided::gt::HALF, DType::U8),
("badd", DType::BF16) => (strided::add::BFLOAT, self.dtype),
("bsub", DType::BF16) => (strided::sub::BFLOAT, self.dtype),
("bmul", DType::BF16) => (strided::mul::BFLOAT, self.dtype),
("bdiv", DType::BF16) => (strided::div::BFLOAT, self.dtype),
("bminimum", DType::BF16) => (strided::min::BFLOAT, self.dtype),
("bmaximum", DType::BF16) => (strided::max::BFLOAT, self.dtype),
("eq", DType::BF16) => (strided::eq::BFLOAT, DType::U8),
("ne", DType::BF16) => (strided::ne::BFLOAT, DType::U8),
("le", DType::BF16) => (strided::le::BFLOAT, DType::U8),
("lt", DType::BF16) => (strided::lt::BFLOAT, DType::U8),
("ge", DType::BF16) => (strided::ge::BFLOAT, DType::U8),
("gt", DType::BF16) => (strided::gt::BFLOAT, DType::U8),
("badd", DType::I64) => (strided::add::I64, self.dtype),
("bsub", DType::I64) => (strided::sub::I64, self.dtype),
("bmul", DType::I64) => (strided::mul::I64, self.dtype),
("bdiv", DType::I64) => (strided::div::I64, self.dtype),
("bminimum", DType::I64) => (strided::min::I64, self.dtype),
("bmaximum", DType::I64) => (strided::max::I64, self.dtype),
("eq", DType::I64) => (strided::eq::I64, DType::U8),
("ne", DType::I64) => (strided::ne::I64, DType::U8),
("le", DType::I64) => (strided::le::I64, DType::U8),
("lt", DType::I64) => (strided::lt::I64, DType::U8),
("ge", DType::I64) => (strided::ge::I64, DType::U8),
("gt", DType::I64) => (strided::gt::I64, DType::U8),
("badd", DType::U32) => (strided::add::U32, self.dtype),
("bsub", DType::U32) => (strided::sub::U32, self.dtype),
("bmul", DType::U32) => (strided::mul::U32, self.dtype),
("bdiv", DType::U32) => (strided::div::U32, self.dtype),
("bminimum", DType::U32) => (strided::min::U32, self.dtype),
("bmaximum", DType::U32) => (strided::max::U32, self.dtype),
("eq", DType::U32) => (strided::eq::U32, DType::U8),
("ne", DType::U32) => (strided::ne::U32, DType::U8),
("le", DType::U32) => (strided::le::U32, DType::U8),
("lt", DType::U32) => (strided::lt::U32, DType::U8),
("ge", DType::U32) => (strided::ge::U32, DType::U8),
("gt", DType::U32) => (strided::gt::U32, DType::U8),
("badd", DType::U8) => (strided::add::U8, self.dtype),
("bsub", DType::U8) => (strided::sub::U8, self.dtype),
("bmul", DType::U8) => (strided::mul::U8, self.dtype),
("bdiv", DType::U8) => (strided::div::U8, self.dtype),
("bminimum", DType::U8) => (strided::min::U8, self.dtype),
("bmaximum", DType::U8) => (strided::max::U8, self.dtype),
("eq", DType::U8) => (strided::eq::U8, DType::U8),
("ne", DType::U8) => (strided::ne::U8, DType::U8),
("le", DType::U8) => (strided::le::U8, DType::U8),
("lt", DType::U8) => (strided::lt::U8, DType::U8),
("ge", DType::U8) => (strided::ge::U8, DType::U8),
("gt", DType::U8) => (strided::gt::U8, DType::U8),
(name, dtype) => {
crate::bail!("Metal strided binary {name} {dtype:?} not implemented")
}
};
let buffer = device.new_buffer(el_count, dtype, op)?;
candle_metal_kernels::call_binary_strided(
&device.device,
&command_buffer,
&device.kernels,
kernel_name,
lhs_l.dims(),
lhs,
lhs_l.stride(),
rhs,
rhs_l.stride(),
&buffer,
)
.map_err(MetalError::from)?;
(buffer, dtype)
};
command_buffer.set_label("binary");
Ok(Self::new(buffer, device.clone(), el_count, dtype))
}
pub(crate) fn to_cpu<T: Clone>(&self) -> Result<Vec<T>> {
let size = (self.count * self.dtype.size_in_bytes()) as NSUInteger;
let buffer = self.device.new_buffer_managed(size)?;
{
let command_buffer = self.device.command_buffer()?;
command_buffer.set_label("to_cpu");
let blit = command_buffer.new_blit_command_encoder();
blit.set_label("blit_to_cpu");
blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, size);
blit.end_encoding();
}
self.device.wait_until_completed()?;
Ok(read_to_vec(&buffer, self.count))
}
}
impl BackendDevice for MetalDevice {
type Storage = MetalStorage;
fn new(ordinal: usize) -> Result<Self> {
let device = metal::Device::all().swap_remove(ordinal);
let command_queue = device.new_command_queue();
let kernels = Arc::new(Kernels::new());
let seed = Arc::new(Mutex::new(device.new_buffer_with_data(
[299792458].as_ptr() as *const c_void,
4,
MTLResourceOptions::StorageModeManaged,
)));
let commands = device::Commands::new(command_queue)?;
Ok(Self {
id: DeviceId::new(),
device,
commands: Arc::new(RwLock::new(commands)),
buffers: Arc::new(RwLock::new(HashMap::new())),
kernels,
seed,
})
}
fn location(&self) -> crate::DeviceLocation {
crate::DeviceLocation::Metal {
gpu_id: self.registry_id() as usize,
}
}
fn same_device(&self, rhs: &Self) -> bool {
self.id == rhs.id
}
unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> {
let buffer = self.new_buffer(shape.elem_count(), dtype, "alloc-uninit")?;
Ok(MetalStorage::new(
buffer,
self.clone(),
shape.elem_count(),
dtype,
))
}
fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> {
let size = shape.elem_count() * dtype.size_in_bytes();
let buffer = self.allocate_zeros(size)?;
Ok(MetalStorage::new(
buffer,
self.clone(),
shape.elem_count(),
dtype,
))
}
fn ones_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> {
let name = match dtype {
DType::U8 => "fill_u8",
DType::U32 => "fill_u32",
DType::I64 => "fill_i64",
DType::F16 => "fill_f16",
DType::BF16 => "fill_bf16",
DType::F32 => "fill_f32",
DType::F64 => {
let cpu_storage = crate::cpu_backend::CpuDevice.ones_impl(shape, dtype)?;
return self.storage_from_cpu_storage(&cpu_storage);
}
};
let buffer = self.new_buffer(shape.elem_count(), dtype, "alloc-ones")?;
let command_buffer = self.command_buffer()?;
candle_metal_kernels::call_const_fill(
&self.device,
&command_buffer,
&self.kernels,
name,
shape.elem_count(),
&buffer,
1.,
)
.map_err(MetalError::from)?;
Ok(MetalStorage::new(
buffer,
self.clone(),
shape.elem_count(),
dtype,
))
}
fn storage_from_slice<T: crate::WithDType>(&self, s: &[T]) -> Result<Self::Storage> {
let (count, buffer) = match T::cpu_storage_ref(s) {
CpuStorageRef::U8(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorageRef::U32(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorageRef::I64(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorageRef::BF16(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorageRef::F16(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorageRef::F32(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorageRef::F64(storage) => (storage.len(), self.new_buffer_with_data(storage)),
};
Ok(Self::Storage::new(buffer?, self.clone(), count, T::DTYPE))
}
fn storage_from_cpu_storage(&self, storage: &CpuStorage) -> Result<Self::Storage> {
let (count, buffer) = match storage {
CpuStorage::U8(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorage::U32(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorage::I64(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorage::BF16(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorage::F16(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorage::F32(storage) => (storage.len(), self.new_buffer_with_data(storage)),
CpuStorage::F64(storage) => (storage.len(), self.new_buffer_with_data(storage)),
};
Ok(Self::Storage::new(
buffer?,
self.clone(),
count,
storage.dtype(),
))
}
fn storage_from_cpu_storage_owned(&self, storage: CpuStorage) -> Result<Self::Storage> {
self.storage_from_cpu_storage(&storage)
}
fn rand_uniform(
&self,
shape: &Shape,
dtype: DType,
min: f64,
max: f64,
) -> Result<Self::Storage> {
let name = match dtype {
DType::F32 => "rand_uniform_f32",
DType::F16 => "rand_uniform_f16",
DType::BF16 => "rand_uniform_bf16",
dtype => crate::bail!("rand_uniform not implemented for {dtype:?}"),
};
let buffer = self.new_buffer(shape.elem_count(), dtype, "rand_uniform")?;
let command_buffer = self.command_buffer()?;
candle_metal_kernels::call_random_uniform(
&self.device,
&command_buffer,
&self.kernels,
name,
min as f32,
max as f32,
shape.elem_count(),
&self.seed.lock().unwrap(),
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::Storage::new(
buffer,
self.clone(),
shape.elem_count(),
dtype,
))
}
fn rand_normal(
&self,
shape: &Shape,
dtype: DType,
mean: f64,
stddev: f64,
) -> Result<Self::Storage> {
let name = match dtype {
DType::F32 => "rand_normal_f32",
DType::F16 => "rand_normal_f16",
DType::BF16 => "rand_normal_bf16",
dtype => crate::bail!("rand_uniform not implemented for {dtype:?}"),
};
let buffer = self.new_buffer(shape.elem_count(), dtype, "rand_normal")?;
let command_buffer = self.command_buffer()?;
candle_metal_kernels::call_random_normal(
&self.device,
&command_buffer,
&self.kernels,
name,
mean as f32,
stddev as f32,
shape.elem_count(),
&self.seed.lock().unwrap(),
&buffer,
)
.map_err(MetalError::from)?;
Ok(Self::Storage::new(
buffer,
self.clone(),
shape.elem_count(),
dtype,
))
}
fn set_seed(&self, seed: u64) -> Result<()> {
let seed: u32 = seed.try_into().map_err(|_| {
MetalError::Message("Metal seed must be less than or equal to u32::MAX".to_string())
})?;
let seed_buffer = self.seed.try_lock().map_err(MetalError::from)?;
let contents = seed_buffer.contents();
unsafe {
std::ptr::copy([seed].as_ptr(), contents as *mut u32, 1);
}
seed_buffer.did_modify_range(metal::NSRange::new(0, 4));
Ok(())
}
fn synchronize(&self) -> Result<()> {
self.wait_until_completed()
}
}
fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> {
let ptr = buffer.contents() as *const T;
assert!(!ptr.is_null());
let slice = unsafe { std::slice::from_raw_parts(ptr, n) };
slice.to_vec()
}
| candle/candle-core/src/metal_backend/mod.rs/0 | {
"file_path": "candle/candle-core/src/metal_backend/mod.rs",
"repo_id": "candle",
"token_count": 51267
} |
use crate::Result;
pub(super) fn nearest_int(v: f32) -> i32 {
v.round() as i32
}
/// Validates that the input and output are the right size and returns an iterator which maps each
/// input region `xs` to its corresponding output block in `ys`. Each output region is guaranteed
/// to be `T::BLCK_SIZE` long.
pub(super) fn group_for_quantization<'a, 'b, T: super::k_quants::GgmlType>(
xs: &'b [f32],
ys: &'a mut [T],
) -> Result<Vec<(&'a mut T, &'b [f32])>> {
let block_size = T::BLCK_SIZE;
let dtype = T::DTYPE;
let expected_blocks = xs.len() / block_size;
let actual_blocks = ys.len();
// Validate that the input is the right size
if expected_blocks != actual_blocks {
crate::bail!("quantize {dtype:?}: expected {expected_blocks} blocks but only {actual_blocks} were provided!")
}
Ok(ys.iter_mut().zip(xs.chunks_exact(block_size)).collect())
}
/// Validates that the input and output are the right size and returns an iterator which maps each
/// input block `xs` to its corresponding output region in `ys`. Each output region is guaranteed
/// to be `T::BLCK_SIZE` long.
pub(super) fn group_for_dequantization<'a, 'b, T: super::k_quants::GgmlType>(
xs: &'a [T],
ys: &'b mut [f32],
) -> Result<Vec<(&'a T, &'b mut [f32])>> {
let block_size = T::BLCK_SIZE;
let dtype = T::DTYPE;
let actual_output_len = ys.len();
let expected_output_len = xs.len() * block_size;
// Validate that the output is the right size
if expected_output_len != actual_output_len {
crate::bail!("dequantize {dtype:?}: ys (len = {actual_output_len}) does not match the expected length of {expected_output_len}!")
}
// Zip the blocks and outputs together
Ok(xs.iter().zip(ys.chunks_exact_mut(block_size)).collect())
}
pub(super) fn get_scale_min_k4(j: usize, q: &[u8]) -> (u8, u8) {
if j < 4 {
let d = q[j] & 63;
let m = q[j + 4] & 63;
(d, m)
} else {
let d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4);
let m = (q[j + 4] >> 4) | ((q[j] >> 6) << 4);
(d, m)
}
}
pub(super) unsafe fn make_qx_quants(
n: usize,
nmax: i32,
x: *const f32,
ls: *mut i8,
rmse_type: i32,
) -> f32 {
let mut max = 0f32;
let mut amax = 0f32;
for i in 0..n {
let x = *x.add(i);
let ax = x.abs();
if ax > amax {
amax = ax;
max = x;
}
}
if amax == 0. {
// all zero
for i in 0..n {
*ls.add(i) = 0;
}
return 0.;
}
let mut iscale = -(nmax as f32) / max;
if rmse_type == 0 {
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
*ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8;
}
return 1.0 / iscale;
}
let weight_type = rmse_type % 2;
let mut sumlx = 0f32;
let mut suml2 = 0f32;
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
let l = l.clamp(-nmax, nmax - 1);
*ls.add(i) = (l + nmax) as i8;
let w = if weight_type == 1 { x * x } else { 1.0 };
let l = l as f32;
sumlx += w * x * l;
suml2 += w * l * l;
}
let mut scale = sumlx / suml2;
let mut best = scale * sumlx;
for _itry in 0..3 {
let iscale = 1.0 / scale;
let mut slx = 0f32;
let mut sl2 = 0f32;
let mut changed = false;
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
let l = l.clamp(-nmax, nmax - 1);
if l + nmax != *ls.add(i) as i32 {
changed = true;
}
let w = if weight_type == 1 { x * x } else { 1f32 };
let l = l as f32;
slx += w * x * l;
sl2 += w * l * l;
}
if !changed || sl2 == 0.0 || slx * slx <= best * sl2 {
break;
}
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
*ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8;
}
sumlx = slx;
suml2 = sl2;
scale = sumlx / suml2;
best = scale * sumlx;
}
for _itry in 0..5 {
let mut n_changed = 0;
for i in 0..n {
let x = *x.add(i);
let w = if weight_type == 1 { x * x } else { 1. };
let l = *ls.add(i) as i32 - nmax;
let mut slx = sumlx - w * x * l as f32;
if slx > 0. {
let mut sl2 = suml2 - w * l as f32 * l as f32;
let new_l = nearest_int(x * sl2 / slx);
let new_l = new_l.clamp(-nmax, nmax - 1);
if new_l != l {
slx += w * x * new_l as f32;
sl2 += w * new_l as f32 * new_l as f32;
if sl2 > 0. && slx * slx * suml2 > sumlx * sumlx * sl2 {
*ls.add(i) = (nmax + new_l) as i8;
sumlx = slx;
suml2 = sl2;
scale = sumlx / suml2;
best = scale * sumlx;
n_changed += 1;
}
}
}
}
if n_changed == 0 {
break;
}
}
if rmse_type < 3 {
return scale;
}
for is in -4..4 {
if is == 0 {
continue;
}
iscale = -(nmax as f32 + 0.1f32 * is as f32) / max;
let mut sumlx = 0.;
let mut suml2 = 0.;
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
let l = l.clamp(-nmax, nmax - 1);
let w = if weight_type == 1 { x * x } else { 1. };
let l = l as f32;
sumlx += w * x * l;
suml2 += w * l * l;
}
if suml2 > 0. && sumlx * sumlx > best * suml2 {
for i in 0..n {
let x = *x.add(i);
let l = nearest_int(iscale * x);
*ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8;
}
scale = sumlx / suml2;
best = scale * sumlx;
}
}
scale
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L224
pub(super) fn make_qkx1_quants(nmax: i32, ntry: usize, x: &[f32]) -> (f32, f32) {
let n = x.len();
let mut l = vec![0; n];
// Get min/max
let min = *x
.iter()
.take(n)
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&x[0]);
let max = *x.iter().max_by(|a, b| a.total_cmp(b)).unwrap_or(&x[0]);
// If min == max, all values are the same => nothing to do here
if max == min {
return (0.0, 0.0);
}
// Ensure min <= 0.0
let mut min = min.min(0.);
// Compute scale and inverse scale
let mut iscale = nmax as f32 / (max - min);
let mut scale = 1.0 / iscale;
for _ in 0..ntry {
let mut sumlx = 0.0;
let mut suml2 = 0;
let mut did_change = false;
for (i, value) in x.iter().enumerate().take(n) {
let li = nearest_int(iscale * (value - min)).clamp(0, nmax);
let clamped_li = li as u8;
if clamped_li != l[i] {
l[i] = clamped_li;
did_change = true;
}
sumlx += (value - min) * li as f32;
suml2 += li * li;
}
scale = sumlx / suml2 as f32;
let sum: f32 = x
.iter()
.take(n)
.zip(l.iter().take(n))
.map(|(xi, &li)| xi - scale * li as f32)
.sum();
min = sum / n as f32;
if min > 0.0 {
min = 0.0;
}
iscale = 1.0 / scale;
if !did_change {
break;
}
}
(scale, -min)
}
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L165
pub(super) fn make_q3_quants(x: &[f32], nmax: i32, do_rmse: bool) -> f32 {
let n = x.len();
let mut l = vec![0i8; n];
let mut max = 0.0;
let mut amax = 0.0;
for &xi in x.iter().take(n) {
let ax = xi.abs();
if ax > amax {
amax = ax;
max = xi;
}
}
if amax == 0.0 {
return 0.0;
}
let iscale = -(nmax as f32) / max;
if do_rmse {
let mut sumlx = 0.0;
let mut suml2 = 0.0;
for i in 0..n {
let li = (iscale * x[i]).round() as i32;
let li = li.clamp(-nmax, nmax - 1);
l[i] = li as i8;
let w = x[i] * x[i];
sumlx += w * x[i] * li as f32;
suml2 += w * (li * li) as f32;
}
for _ in 0..5 {
let mut n_changed = 0;
for i in 0..n {
let w = x[i] * x[i];
let mut slx = sumlx - w * x[i] * l[i] as f32;
if slx > 0.0 {
let mut sl2 = suml2 - w * (l[i] as i32 * l[i] as i32) as f32;
let mut new_l = (x[i] * sl2 / slx).round() as i32;
new_l = new_l.clamp(-nmax, nmax - 1);
if new_l != l[i] as i32 {
slx += w * x[i] * new_l as f32;
sl2 += w * (new_l * new_l) as f32;
if sl2 > 0.0 && slx * slx * suml2 > sumlx * sumlx * sl2 {
l[i] = new_l as i8;
sumlx = slx;
suml2 = sl2;
n_changed += 1;
}
}
}
}
if n_changed == 0 {
break;
}
}
for li in l.iter_mut() {
*li += nmax as i8;
}
return sumlx / suml2;
}
for i in 0..n {
let li = (iscale * x[i]).round() as i32;
l[i] = (li.clamp(-nmax, nmax - 1) + nmax) as i8;
}
1.0 / iscale
}
| candle/candle-core/src/quantized/utils.rs/0 | {
"file_path": "candle/candle-core/src/quantized/utils.rs",
"repo_id": "candle",
"token_count": 5775
} |
[package]
name = "candle-datasets"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
readme = "README.md"
[dependencies]
byteorder = { workspace = true }
candle = { workspace = true }
candle-nn = { workspace = true }
hf-hub = { workspace = true}
intel-mkl-src = { workspace = true, optional = true }
memmap2 = { workspace = true }
tokenizers = { workspace = true, features = ["onig"] }
rand = { workspace = true }
thiserror = { workspace = true }
parquet = { workspace = true}
image = { workspace = true }
| candle/candle-datasets/Cargo.toml/0 | {
"file_path": "candle/candle-datasets/Cargo.toml",
"repo_id": "candle",
"token_count": 201
} |
//! BEiT: BERT Pre-Training of Image Transformers
//! https://github.com/microsoft/unilm/tree/master/beit
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::Parser;
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::beit;
/// Loads an image from disk using the image crate, this returns a tensor with shape
/// (3, 384, 384). Beit special normalization is applied.
pub fn load_image384_beit_norm<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> {
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(384, 384, image::imageops::FilterType::Triangle);
let img = img.to_rgb8();
let data = img.into_raw();
let data = Tensor::from_vec(data, (384, 384, 3), &Device::Cpu)?.permute((2, 0, 1))?;
let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?;
let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?;
(data.to_dtype(candle::DType::F32)? / 255.)?
.broadcast_sub(&mean)?
.broadcast_div(&std)
}
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = load_image384_beit_norm(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("vincent-espitalier/candle-beit".into());
api.get("beit_base_patch16_384.in22k_ft_in22k_in1k.safetensors")?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = beit::vit_base(vb)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| candle/candle-examples/examples/beit/main.rs/0 | {
"file_path": "candle/candle-examples/examples/beit/main.rs",
"repo_id": "candle",
"token_count": 1178
} |
# candle-convnext
[A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) and
[ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808).
This candle implementation uses a pre-trained ConvNeXt network for inference. The
classification head has been trained on the ImageNet dataset and returns the
probabilities for the top-5 classes.
## Running an example
```
$ cargo run --example convnext --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which tiny
loaded image Tensor[dims 3, 224, 224; f32]
model built
mountain bike, all-terrain bike, off-roader: 84.09%
bicycle-built-for-two, tandem bicycle, tandem: 4.15%
maillot : 0.74%
crash helmet : 0.54%
unicycle, monocycle : 0.44%
```
| candle/candle-examples/examples/convnext/README.md/0 | {
"file_path": "candle/candle-examples/examples/convnext/README.md",
"repo_id": "candle",
"token_count": 293
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle_transformers::models::distilbert::{Config, DistilBertModel, DTYPE};
use anyhow::{Error as E, Result};
use candle::{Device, Tensor};
use candle_nn::VarBuilder;
use clap::Parser;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::Tokenizer;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The model to use, check out available models: https://huggingface.co/models?library=sentence-transformers&sort=trending
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
/// When set, compute embeddings for this prompt.
#[arg(long)]
prompt: String,
/// Use the pytorch weights rather than the safetensors ones
#[arg(long)]
use_pth: bool,
/// The number of times to run the prompt.
#[arg(long, default_value = "1")]
n: usize,
/// L2 normalization for embeddings.
#[arg(long, default_value = "true")]
normalize_embeddings: bool,
}
impl Args {
fn build_model_and_tokenizer(&self) -> Result<(DistilBertModel, Tokenizer)> {
let device = candle_examples::device(self.cpu)?;
let default_model = "distilbert-base-uncased".to_string();
let default_revision = "main".to_string();
let (model_id, revision) = match (self.model_id.to_owned(), self.revision.to_owned()) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, default_revision),
};
let repo = Repo::with_revision(model_id, RepoType::Model, revision);
let (config_filename, tokenizer_filename, weights_filename) = {
let api = Api::new()?;
let api = api.repo(repo);
let config = api.get("config.json")?;
let tokenizer = api.get("tokenizer.json")?;
let weights = if self.use_pth {
api.get("pytorch_model.bin")?
} else {
api.get("model.safetensors")?
};
(config, tokenizer, weights)
};
let config = std::fs::read_to_string(config_filename)?;
let config: Config = serde_json::from_str(&config)?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let vb = if self.use_pth {
VarBuilder::from_pth(&weights_filename, DTYPE, &device)?
} else {
unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], DTYPE, &device)? }
};
let model = DistilBertModel::load(vb, &config)?;
Ok((model, tokenizer))
}
}
fn get_mask(size: usize, device: &Device) -> Tensor {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device).unwrap()
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
println!("tracing...");
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let (model, mut tokenizer) = args.build_model_and_tokenizer()?;
let device = &model.device;
let tokenizer = tokenizer
.with_padding(None)
.with_truncation(None)
.map_err(E::msg)?;
let tokens = tokenizer
.encode(args.prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?;
let mask = get_mask(tokens.len(), device);
println!("token_ids: {:?}", token_ids.to_vec2::<u32>());
println!("mask: {:?}", mask.to_vec2::<u8>());
let ys = model.forward(&token_ids, &mask)?;
println!("{ys}");
Ok(())
}
pub fn normalize_l2(v: &Tensor) -> Result<Tensor> {
Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?)
}
| candle/candle-examples/examples/distilbert/main.rs/0 | {
"file_path": "candle/candle-examples/examples/distilbert/main.rs",
"repo_id": "candle",
"token_count": 1939
} |
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use candle_transformers::models::{clip, flux, t5};
use anyhow::{Error as E, Result};
use candle::{IndexOp, Module, Tensor};
use candle_nn::VarBuilder;
use clap::Parser;
use tokenizers::Tokenizer;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The prompt to be used for image generation.
#[arg(long, default_value = "A rusty robot walking on a beach")]
prompt: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Use the quantized model.
#[arg(long)]
quantized: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The height in pixels of the generated image.
#[arg(long)]
height: Option<usize>,
/// The width in pixels of the generated image.
#[arg(long)]
width: Option<usize>,
#[arg(long)]
decode_only: Option<String>,
#[arg(long, value_enum, default_value = "schnell")]
model: Model,
/// Use the slower kernels.
#[arg(long)]
use_dmmv: bool,
/// The seed to use when generating random samples.
#[arg(long)]
seed: Option<u64>,
}
#[derive(Debug, Clone, Copy, clap::ValueEnum, PartialEq, Eq)]
enum Model {
Schnell,
Dev,
}
fn run(args: Args) -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let Args {
prompt,
cpu,
height,
width,
tracing,
decode_only,
model,
quantized,
..
} = args;
let width = width.unwrap_or(1360);
let height = height.unwrap_or(768);
let _guard = if tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let api = hf_hub::api::sync::Api::new()?;
let bf_repo = {
let name = match model {
Model::Dev => "black-forest-labs/FLUX.1-dev",
Model::Schnell => "black-forest-labs/FLUX.1-schnell",
};
api.repo(hf_hub::Repo::model(name.to_string()))
};
let device = candle_examples::device(cpu)?;
if let Some(seed) = args.seed {
device.set_seed(seed)?;
}
let dtype = device.bf16_default_to_f32();
let img = match decode_only {
None => {
let t5_emb = {
let repo = api.repo(hf_hub::Repo::with_revision(
"google/t5-v1_1-xxl".to_string(),
hf_hub::RepoType::Model,
"refs/pr/2".to_string(),
));
let model_file = repo.get("model.safetensors")?;
let vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? };
let config_filename = repo.get("config.json")?;
let config = std::fs::read_to_string(config_filename)?;
let config: t5::Config = serde_json::from_str(&config)?;
let mut model = t5::T5EncoderModel::load(vb, &config)?;
let tokenizer_filename = api
.model("lmz/mt5-tokenizers".to_string())
.get("t5-v1_1-xxl.tokenizer.json")?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let mut tokens = tokenizer
.encode(prompt.as_str(), true)
.map_err(E::msg)?
.get_ids()
.to_vec();
tokens.resize(256, 0);
let input_token_ids = Tensor::new(&tokens[..], &device)?.unsqueeze(0)?;
println!("{input_token_ids}");
model.forward(&input_token_ids)?
};
println!("T5\n{t5_emb}");
let clip_emb = {
let repo = api.repo(hf_hub::Repo::model(
"openai/clip-vit-large-patch14".to_string(),
));
let model_file = repo.get("model.safetensors")?;
let vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? };
// https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json
let config = clip::text_model::ClipTextConfig {
vocab_size: 49408,
projection_dim: 768,
activation: clip::text_model::Activation::QuickGelu,
intermediate_size: 3072,
embed_dim: 768,
max_position_embeddings: 77,
pad_with: None,
num_hidden_layers: 12,
num_attention_heads: 12,
};
let model =
clip::text_model::ClipTextTransformer::new(vb.pp("text_model"), &config)?;
let tokenizer_filename = repo.get("tokenizer.json")?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let tokens = tokenizer
.encode(prompt.as_str(), true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let input_token_ids = Tensor::new(&tokens[..], &device)?.unsqueeze(0)?;
println!("{input_token_ids}");
model.forward(&input_token_ids)?
};
println!("CLIP\n{clip_emb}");
let img = {
let cfg = match model {
Model::Dev => flux::model::Config::dev(),
Model::Schnell => flux::model::Config::schnell(),
};
let img = flux::sampling::get_noise(1, height, width, &device)?.to_dtype(dtype)?;
let state = if quantized {
flux::sampling::State::new(
&t5_emb.to_dtype(candle::DType::F32)?,
&clip_emb.to_dtype(candle::DType::F32)?,
&img.to_dtype(candle::DType::F32)?,
)?
} else {
flux::sampling::State::new(&t5_emb, &clip_emb, &img)?
};
let timesteps = match model {
Model::Dev => {
flux::sampling::get_schedule(50, Some((state.img.dim(1)?, 0.5, 1.15)))
}
Model::Schnell => flux::sampling::get_schedule(4, None),
};
println!("{state:?}");
println!("{timesteps:?}");
if quantized {
let model_file = match model {
Model::Schnell => api
.repo(hf_hub::Repo::model("lmz/candle-flux".to_string()))
.get("flux1-schnell.gguf")?,
Model::Dev => todo!(),
};
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
model_file, &device,
)?;
let model = flux::quantized_model::Flux::new(&cfg, vb)?;
flux::sampling::denoise(
&model,
&state.img,
&state.img_ids,
&state.txt,
&state.txt_ids,
&state.vec,
×teps,
4.,
)?
.to_dtype(dtype)?
} else {
let model_file = match model {
Model::Schnell => bf_repo.get("flux1-schnell.safetensors")?,
Model::Dev => bf_repo.get("flux1-dev.safetensors")?,
};
let vb = unsafe {
VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)?
};
let model = flux::model::Flux::new(&cfg, vb)?;
flux::sampling::denoise(
&model,
&state.img,
&state.img_ids,
&state.txt,
&state.txt_ids,
&state.vec,
×teps,
4.,
)?
}
};
flux::sampling::unpack(&img, height, width)?
}
Some(file) => {
let mut st = candle::safetensors::load(file, &device)?;
st.remove("img").unwrap().to_dtype(dtype)?
}
};
println!("latent img\n{img}");
let img = {
let model_file = bf_repo.get("ae.safetensors")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? };
let cfg = match model {
Model::Dev => flux::autoencoder::Config::dev(),
Model::Schnell => flux::autoencoder::Config::schnell(),
};
let model = flux::autoencoder::AutoEncoder::new(&cfg, vb)?;
model.decode(&img)?
};
println!("img\n{img}");
let img = ((img.clamp(-1f32, 1f32)? + 1.0)? * 127.5)?.to_dtype(candle::DType::U8)?;
let filename = match args.seed {
None => "out.jpg".to_string(),
Some(s) => format!("out-{s}.jpg"),
};
candle_examples::save_image(&img.i(0)?, filename)?;
Ok(())
}
fn main() -> Result<()> {
let args = Args::parse();
#[cfg(feature = "cuda")]
candle::quantized::cuda::set_force_dmmv(args.use_dmmv);
run(args)
}
| candle/candle-examples/examples/flux/main.rs/0 | {
"file_path": "candle/candle-examples/examples/flux/main.rs",
"repo_id": "candle",
"token_count": 5445
} |
// An implementation of LLaMA https://github.com/facebookresearch/llama
//
// This is based on nanoGPT in a similar way to:
// https://github.com/Lightning-AI/lit-llama/blob/main/lit_llama/model.py
//
// The tokenizer config can be retrieved from:
// https://huggingface.co/hf-internal-testing/llama-tokenizer/raw/main/tokenizer.json
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use anyhow::{bail, Error as E, Result};
use clap::{Parser, ValueEnum};
use candle::{DType, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::generation::{LogitsProcessor, Sampling};
use hf_hub::{api::sync::Api, Repo, RepoType};
use std::io::Write;
use candle_transformers::models::llama as model;
use model::{Llama, LlamaConfig};
const EOS_TOKEN: &str = "</s>";
const DEFAULT_PROMPT: &str = "My favorite theorem is ";
#[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)]
enum Which {
V1,
V2,
V3,
V31,
V3Instruct,
V31Instruct,
V32_1b,
V32_1bInstruct,
V32_3b,
V32_3bInstruct,
#[value(name = "solar-10.7b")]
Solar10_7B,
#[value(name = "tiny-llama-1.1b-chat")]
TinyLlama1_1BChat,
#[value(name = "SmoLM2-1.7B")]
SmolLM2_1B,
#[value(name = "SmoLM2-1.7B-Instruct")]
SmolLM2_1BInstruct,
#[value(name = "SmoLM2-360M")]
SmolLM2_360M,
#[value(name = "SmoLM2-360M-Instruct")]
SmolLM2_360MInstruct,
#[value(name = "SmoLM2-135M")]
SmolLM2_135M,
#[value(name = "SmoLM2-135M-Instruct")]
SmolLM2_135MInstruct,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 0.8)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// Only sample among the top K samples.
#[arg(long)]
top_k: Option<usize>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The length of the sample to generate (in tokens).
#[arg(short = 'n', long, default_value_t = 10000)]
sample_len: usize,
/// Disable the key-value cache.
#[arg(long)]
no_kv_cache: bool,
/// The initial prompt.
#[arg(long)]
prompt: Option<String>,
/// Use different dtype than f16
#[arg(long)]
dtype: Option<String>,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
/// The model size to use.
#[arg(long, default_value = "v3")]
which: Which,
#[arg(long)]
use_flash_attn: bool,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 128)]
repeat_last_n: usize,
}
fn main() -> Result<()> {
use tokenizers::Tokenizer;
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let device = candle_examples::device(args.cpu)?;
let dtype = match args.dtype.as_deref() {
Some("f16") => DType::F16,
Some("bf16") => DType::BF16,
Some("f32") => DType::F32,
Some(dtype) => bail!("Unsupported dtype {dtype}"),
None => DType::F16,
};
let (llama, tokenizer_filename, mut cache, config) = {
let api = Api::new()?;
let model_id = args.model_id.unwrap_or_else(|| {
let str = match args.which {
Which::V1 => "Narsil/amall-7b",
Which::V2 => "meta-llama/Llama-2-7b-hf",
Which::V3 => "meta-llama/Meta-Llama-3-8B",
Which::V3Instruct => "meta-llama/Meta-Llama-3-8B-Instruct",
Which::V31 => "meta-llama/Llama-3.1-8B",
Which::V31Instruct => "meta-llama/Llama-3.1-8B-Instruct",
Which::V32_1b => "meta-llama/Llama-3.2-1B",
Which::V32_1bInstruct => "meta-llama/Llama-3.2-1B-Instruct",
Which::V32_3b => "meta-llama/Llama-3.2-3B",
Which::V32_3bInstruct => "meta-llama/Llama-3.2-3B-Instruct",
Which::Solar10_7B => "upstage/SOLAR-10.7B-v1.0",
Which::TinyLlama1_1BChat => "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
Which::SmolLM2_135M => "HuggingFaceTB/SmolLM2-135M",
Which::SmolLM2_135MInstruct => "HuggingFaceTB/SmolLM2-135M-Instruct",
Which::SmolLM2_360M => "HuggingFaceTB/SmolLM2-360M",
Which::SmolLM2_360MInstruct => "HuggingFaceTB/SmolLM2-360M-Instruct",
Which::SmolLM2_1B => "HuggingFaceTB/SmolLM2-1.7B",
Which::SmolLM2_1BInstruct => "HuggingFaceTB/SmolLM2-1.7B-Instruct",
};
str.to_string()
});
println!("loading the model weights from {model_id}");
let revision = args.revision.unwrap_or("main".to_string());
let api = api.repo(Repo::with_revision(model_id, RepoType::Model, revision));
let tokenizer_filename = api.get("tokenizer.json")?;
let config_filename = api.get("config.json")?;
let config: LlamaConfig = serde_json::from_slice(&std::fs::read(config_filename)?)?;
let config = config.into_config(args.use_flash_attn);
let filenames = match args.which {
Which::V1
| Which::V2
| Which::V3
| Which::V3Instruct
| Which::V31
| Which::V31Instruct
| Which::V32_3b
| Which::V32_3bInstruct
| Which::Solar10_7B => {
candle_examples::hub_load_safetensors(&api, "model.safetensors.index.json")?
}
Which::SmolLM2_360M
| Which::SmolLM2_360MInstruct
| Which::SmolLM2_135M
| Which::SmolLM2_135MInstruct
| Which::SmolLM2_1B
| Which::SmolLM2_1BInstruct
| Which::V32_1b
| Which::V32_1bInstruct
| Which::TinyLlama1_1BChat => {
vec![api.get("model.safetensors")?]
}
};
let cache = model::Cache::new(!args.no_kv_cache, dtype, &config, &device)?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? };
(Llama::load(vb, &config)?, tokenizer_filename, cache, config)
};
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let eos_token_id = config.eos_token_id.or_else(|| {
tokenizer
.token_to_id(EOS_TOKEN)
.map(model::LlamaEosToks::Single)
});
let prompt = args.prompt.as_ref().map_or(DEFAULT_PROMPT, |p| p.as_str());
let mut tokens = tokenizer
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let mut tokenizer = candle_examples::token_output_stream::TokenOutputStream::new(tokenizer);
println!("starting the inference loop");
print!("{prompt}");
let mut logits_processor = {
let temperature = args.temperature;
let sampling = if temperature <= 0. {
Sampling::ArgMax
} else {
match (args.top_k, args.top_p) {
(None, None) => Sampling::All { temperature },
(Some(k), None) => Sampling::TopK { k, temperature },
(None, Some(p)) => Sampling::TopP { p, temperature },
(Some(k), Some(p)) => Sampling::TopKThenTopP { k, p, temperature },
}
};
LogitsProcessor::from_sampling(args.seed, sampling)
};
let mut start_gen = std::time::Instant::now();
let mut index_pos = 0;
let mut token_generated = 0;
for index in 0..args.sample_len {
let (context_size, context_index) = if cache.use_kv_cache && index > 0 {
(1, index_pos)
} else {
(tokens.len(), 0)
};
if index == 1 {
start_gen = std::time::Instant::now()
}
let ctxt = &tokens[tokens.len().saturating_sub(context_size)..];
let input = Tensor::new(ctxt, &device)?.unsqueeze(0)?;
let logits = llama.forward(&input, context_index, &mut cache)?;
let logits = logits.squeeze(0)?;
let logits = if args.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
args.repeat_penalty,
&tokens[start_at..],
)?
};
index_pos += ctxt.len();
let next_token = logits_processor.sample(&logits)?;
token_generated += 1;
tokens.push(next_token);
match eos_token_id {
Some(model::LlamaEosToks::Single(eos_tok_id)) if next_token == eos_tok_id => {
break;
}
Some(model::LlamaEosToks::Multiple(ref eos_ids)) if eos_ids.contains(&next_token) => {
break;
}
_ => (),
}
if let Some(t) = tokenizer.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
}
if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
let dt = start_gen.elapsed();
println!(
"\n\n{} tokens generated ({} token/s)\n",
token_generated,
(token_generated - 1) as f64 / dt.as_secs_f64(),
);
Ok(())
}
| candle/candle-examples/examples/llama/main.rs/0 | {
"file_path": "candle/candle-examples/examples/llama/main.rs",
"repo_id": "candle",
"token_count": 4888
} |
# candle-mobileone
[MobileOne: An Improved One millisecond Mobile Backbone](https://arxiv.org/abs/2206.04040).
This candle implementation uses a pre-trained MobileOne network for inference. The
classification head has been trained on the ImageNet dataset and returns the
probabilities for the top-5 classes.
## Running an example
```
$ cargo run --example mobileone --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which s2
loaded image Tensor[dims 3, 224, 224; f32]
model built
mountain bike, all-terrain bike, off-roader: 79.33%
bicycle-built-for-two, tandem bicycle, tandem: 15.32%
crash helmet : 2.58%
unicycle, monocycle : 1.70%
alp : 0.21%
```
| candle/candle-examples/examples/mobileone/README.md/0 | {
"file_path": "candle/candle-examples/examples/mobileone/README.md",
"repo_id": "candle",
"token_count": 254
} |
# candle-qwen: large language model series from Alibaba Cloud
Qwen 1.5 is a series of large language models that provide strong performances
on English and Chinese.
- [Blog post](https://qwenlm.github.io/blog/qwen1.5/) introducing Qwen1.5.
- [Model card](https://huggingface.co/Qwen/Qwen1.5-0.5B) on the HuggingFace Hub.
- [Blog post](https://qwenlm.github.io/blog/qwen-moe/) for the
mixture-of-experts (MoE) variant.
## Running the example
```bash
$ cargo run --example qwen --release -- --prompt "Hello there "
```
Various model sizes are available via the `--model` argument, including the MoE
variant.
```bash
$ cargo run --example qwen --release -- --model moe-a2.7b --prompt 'def print_prime(n: int): '
def print_prime(n: int): # n is the number of primes to be printed
for i in range(2, n + 1):
if all(i % j != 0 for j in range(2, i)):
print(i)
```
| candle/candle-examples/examples/qwen/README.md/0 | {
"file_path": "candle/candle-examples/examples/qwen/README.md",
"repo_id": "candle",
"token_count": 327
} |
# candle-resnet
A candle implementation of inference using a pre-trained [ResNet](https://arxiv.org/abs/1512.03385).
This uses a classification head trained on the ImageNet dataset and returns the
probabilities for the top-5 classes.
## Running an example
```
$ cargo run --example resnet --release -- --image tiger.jpg
loaded image Tensor[dims 3, 224, 224; f32]
model built
tiger, Panthera tigris : 90.21%
tiger cat : 8.93%
lion, king of beasts, Panthera leo: 0.35%
leopard, Panthera pardus: 0.16%
jaguar, panther, Panthera onca, Felis onca: 0.09%
```
| candle/candle-examples/examples/resnet/README.md/0 | {
"file_path": "candle/candle-examples/examples/resnet/README.md",
"repo_id": "candle",
"token_count": 204
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use clap::Parser;
use candle::{DType, Tensor};
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Which {
#[value(name = "silero")]
Silero,
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum SampleRate {
#[value(name = "8000")]
Sr8k,
#[value(name = "16000")]
Sr16k,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
input: Option<String>,
#[arg(long)]
sample_rate: SampleRate,
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
config_file: Option<String>,
/// The model to use.
#[arg(long, default_value = "silero")]
which: Which,
}
/// an iterator which reads consecutive frames of le i16 values from a reader
struct I16Frames<R> {
rdr: R,
buf: Box<[u8]>,
len: usize,
eof: bool,
}
impl<R> I16Frames<R> {
fn new(rdr: R, frame_size: usize) -> Self {
I16Frames {
rdr,
buf: vec![0; frame_size * std::mem::size_of::<i16>()].into_boxed_slice(),
len: 0,
eof: false,
}
}
}
impl<R: std::io::Read> Iterator for I16Frames<R> {
type Item = std::io::Result<Vec<f32>>;
fn next(&mut self) -> Option<Self::Item> {
if self.eof {
return None;
}
self.len += match self.rdr.read(&mut self.buf[self.len..]) {
Ok(0) => {
self.eof = true;
0
}
Ok(n) => n,
Err(e) => return Some(Err(e)),
};
if self.eof || self.len == self.buf.len() {
let buf = self.buf[..self.len]
.chunks(2)
.map(|bs| match bs {
[a, b] => i16::from_le_bytes([*a, *b]),
_ => unreachable!(),
})
.map(|i| i as f32 / i16::MAX as f32)
.collect();
self.len = 0;
Some(Ok(buf))
} else {
self.next()
}
}
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
let start = std::time::Instant::now();
let model_id = match &args.model_id {
Some(model_id) => std::path::PathBuf::from(model_id),
None => match args.which {
Which::Silero => hf_hub::api::sync::Api::new()?
.model("onnx-community/silero-vad".into())
.get("onnx/model.onnx")?,
// TODO: candle-onnx doesn't support Int8 dtype
// Which::SileroQuantized => hf_hub::api::sync::Api::new()?
// .model("onnx-community/silero-vad".into())
// .get("onnx/model_quantized.onnx")?,
},
};
let (sample_rate, frame_size, context_size): (i64, usize, usize) = match args.sample_rate {
SampleRate::Sr8k => (8000, 256, 32),
SampleRate::Sr16k => (16000, 512, 64),
};
println!("retrieved the files in {:?}", start.elapsed());
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let model = candle_onnx::read_file(model_id)?;
println!("loaded the model in {:?}", start.elapsed());
let start = std::time::Instant::now();
struct State {
frame_size: usize,
sample_rate: Tensor,
state: Tensor,
context: Tensor,
}
let mut state = State {
frame_size,
sample_rate: Tensor::new(sample_rate, &device)?,
state: Tensor::zeros((2, 1, 128), DType::F32, &device)?,
context: Tensor::zeros((1, context_size), DType::F32, &device)?,
};
let mut res = vec![];
for chunk in I16Frames::new(std::io::stdin().lock(), state.frame_size) {
let chunk = chunk.unwrap();
if chunk.len() < state.frame_size {
continue;
}
let next_context = Tensor::from_slice(
&chunk[state.frame_size - context_size..],
(1, context_size),
&device,
)?;
let chunk = Tensor::from_vec(chunk, (1, state.frame_size), &device)?;
let chunk = Tensor::cat(&[&state.context, &chunk], 1)?;
let inputs = std::collections::HashMap::from_iter([
("input".to_string(), chunk),
("sr".to_string(), state.sample_rate.clone()),
("state".to_string(), state.state.clone()),
]);
let out = candle_onnx::simple_eval(&model, inputs).unwrap();
let out_names = &model.graph.as_ref().unwrap().output;
let output = out.get(&out_names[0].name).unwrap().clone();
state.state = out.get(&out_names[1].name).unwrap().clone();
assert_eq!(state.state.dims(), &[2, 1, 128]);
state.context = next_context;
let output = output.flatten_all()?.to_vec1::<f32>()?;
assert_eq!(output.len(), 1);
let output = output[0];
println!("vad chunk prediction: {output}");
res.push(output);
}
println!("calculated prediction in {:?}", start.elapsed());
let res_len = res.len() as f32;
let prediction = res.iter().sum::<f32>() / res_len;
println!("vad average prediction: {prediction}");
Ok(())
}
| candle/candle-examples/examples/silero-vad/main.rs/0 | {
"file_path": "candle/candle-examples/examples/silero-vad/main.rs",
"repo_id": "candle",
"token_count": 2894
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::path::Path;
use anyhow::{anyhow, Error as E, Result};
use clap::Parser;
use candle_transformers::models::stella_en_v5::{
Config, EmbedDim as StellaEmbedDim, EmbeddingModel,
};
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use hf_hub::{api::sync::Api, Repo};
use tokenizers::{PaddingDirection, PaddingParams, PaddingStrategy, Tokenizer};
struct Embedding {
model: EmbeddingModel,
device: Device,
tokenizer: Tokenizer,
}
impl Embedding {
fn new(model: EmbeddingModel, tokenizer: Tokenizer, device: &Device) -> Self {
Self {
model,
tokenizer,
device: device.clone(),
}
}
fn encode(&mut self, task: EncodeTask, text: Option<String>) -> Result<()> {
// Just shocasing embeddings, this has no real value
if let Some(text) = text {
let qry = task.query_preproc(&[text]);
let encoding = self.tokenizer.encode(qry, true).map_err(|e| anyhow!(e))?;
let shape = (1, encoding.len());
let input = Tensor::from_slice(encoding.get_ids(), shape, &self.device)?;
let mask = Tensor::from_slice(encoding.get_attention_mask(), shape, &self.device)?;
let result = self.model.forward(&input, &mask)?;
println!("embeddings: {result}");
} else {
// Examples copied from [Model Card](https://huggingface.co/dunzhang/stella_en_1.5B_v5#transformers)
let queries = [
"What are some ways to reduce stress?".to_string(),
"What are the benefits of drinking green tea?".to_string(),
];
let docs = [
"There are many effective ways to reduce stress. Some common techniques include deep breathing, meditation, and physical activity. Engaging in hobbies, spending time in nature, and connecting with loved ones can also help alleviate stress. Additionally, setting boundaries, practicing self-care, and learning to say no can prevent stress from building up.".to_string(),
"Green tea has been consumed for centuries and is known for its potential health benefits. It contains antioxidants that may help protect the body against damage caused by free radicals. Regular consumption of green tea has been associated with improved heart health, enhanced cognitive function, and a reduced risk of certain types of cancer. The polyphenols in green tea may also have anti-inflammatory and weight loss properties.".to_string(),
];
// We only encode the queries and not the data
let qry = task.query_preproc(&queries);
let mut qry_encoded = self
.tokenizer
.encode_batch(qry, true)
.map_err(|e| anyhow!(e))?;
let mut docs_encoded = self
.tokenizer
.encode_batch(docs.to_vec(), true)
.map_err(|e| anyhow!(e))?;
let qry_embed = {
// Now, we generate the tensors for the `input` and `mask`
let shape = (qry_encoded.len(), qry_encoded[1].len());
let mut ids = Tensor::zeros(shape, DType::U32, &self.device)?;
let mut masks = Tensor::zeros(shape, DType::U8, &self.device)?;
for (i, e) in qry_encoded.drain(..).enumerate() {
let input_id =
Tensor::from_iter(e.get_ids().to_vec(), &self.device)?.unsqueeze(0)?;
let mask = Tensor::from_iter(e.get_attention_mask().to_vec(), &self.device)?
.to_dtype(DType::U8)?
.unsqueeze(0)?;
ids =
ids.slice_assign(&[i..i + 1, 0..input_id.dims2().unwrap().1], &input_id)?;
masks = masks.slice_assign(&[i..i + 1, 0..mask.dims2().unwrap().1], &mask)?;
}
// Let's generate the embeddings for the query, we are going to be normalizing the result.
// For larger datasets, you can call `.forward()` on batches and run a `l2 norm` pass on the entire data
self.model.forward_norm(&ids, &masks)?
};
let doc_embed = {
let shape = (docs_encoded.len(), docs_encoded[1].len());
let mut ids = Tensor::zeros(shape, DType::U32, &self.device)?;
let mut masks = Tensor::zeros(shape, DType::U8, &self.device)?;
for (i, e) in docs_encoded.drain(..).enumerate() {
let input_id =
Tensor::from_iter(e.get_ids().to_vec(), &self.device)?.unsqueeze(0)?;
let mask = Tensor::from_iter(e.get_attention_mask().to_vec(), &self.device)?
.to_dtype(DType::U8)?
.unsqueeze(0)?;
ids =
ids.slice_assign(&[i..i + 1, 0..input_id.dims2().unwrap().1], &input_id)?;
masks = masks.slice_assign(&[i..i + 1, 0..mask.dims2().unwrap().1], &mask)?;
}
// Let's generate the embeddings for the query, we are going to be normalizing the result.
// For larger datasets, you can call `.forward()` on batches and run a `l2 norm` pass on the entire data
self.model.forward_norm(&ids, &masks)?
};
println!(
"Embed shapes:\nQuery: {:?}\nDocs: {:?}",
qry_embed.shape(),
doc_embed.shape()
); // [2, 1024] for head dim `1024`
// a matmul to generate the `similarity` score
let res = qry_embed.matmul(&doc_embed.t()?)?;
for (k, v) in queries.iter().enumerate() {
let tnsr = res.get(k)?;
let max = tnsr.argmax(0)?.to_scalar::<u32>()?;
println!(
"\nScore: {}\nQuery: {}\nAnswer: {}\n\n",
tnsr.get(max as usize)?.to_scalar::<f32>()?,
v,
docs[k]
);
}
}
Ok(())
}
}
#[derive(Clone, Copy, Debug, clap::ValueEnum, PartialEq, Eq)]
enum EmbedDim {
#[value(name = "256")]
Dim256,
#[value(name = "768")]
Dim768,
#[value(name = "1024")]
Dim1024,
#[value(name = "2048")]
Dim2048,
#[value(name = "4096")]
Dim4096,
#[value(name = "6144")]
Dim6144,
#[value(name = "8192")]
Dim8192,
}
impl EmbedDim {
/// Returns dir path to the embed head weights int he repo
pub fn embed_dim_default_dir(&self) -> &'static str {
match self {
Self::Dim256 => "2_Dense_256",
Self::Dim768 => "2_Dense_768",
Self::Dim1024 => "2_Dense_1024",
Self::Dim2048 => "2_Dense_2048",
Self::Dim4096 => "2_Dense_4096",
Self::Dim6144 => "2_Dense_6144",
Self::Dim8192 => "2_Dense_8192",
}
}
/// Resolves the `EmbedDim` for given variant
pub fn embed_dim(&self) -> StellaEmbedDim {
match self {
Self::Dim256 => StellaEmbedDim::Dim256,
Self::Dim768 => StellaEmbedDim::Dim768,
Self::Dim1024 => StellaEmbedDim::Dim1024,
Self::Dim2048 => StellaEmbedDim::Dim2048,
Self::Dim4096 => StellaEmbedDim::Dim4096,
Self::Dim6144 => StellaEmbedDim::Dim6144,
Self::Dim8192 => StellaEmbedDim::Dim8192,
}
}
}
#[derive(Clone, Copy, Debug, clap::ValueEnum, PartialEq, Eq)]
pub enum EncodeTask {
/// `s2p` is the `retrieval` task
/// Default in this example
#[value(name = "s2p")]
S2P,
/// `s2s` is the semantic similarity task
#[value(name = "s2s")]
S2S,
}
impl EncodeTask {
/// Preprocess a set of inputs basef on a template suggested by the model authors
/// See: https://huggingface.co/dunzhang/stella_en_1.5B_v5#introduction
pub fn query_preproc(&self, txt: &[String]) -> Vec<String> {
let instruct = match self {
Self::S2P => {
"Given a web search query, retrieve relevant passages that answer the query."
}
Self::S2S => "Retrieve semantically similar text.",
};
txt.iter()
.map(|s| format!("Instruct: {instruct}\nQuery: {s}"))
.collect::<Vec<_>>()
}
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Which {
#[value(name = "1.5b")]
Large,
#[value(name = "400m")]
Small,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(long)]
which: Which,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
use_flash_attn: bool,
#[arg(long)]
query: Option<String>,
#[arg(long, default_value = "1024")]
embed_dim: Option<EmbedDim>,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
base_weight_files: Option<String>,
#[arg(long)]
embed_head_weight_files: Option<String>,
/// `Stella` is trained on 2 tasks: See [`Model Card`](https://huggingface.co/dunzhang/stella_en_1.5B_v5)
/// `s2s`: Semantic textual similarity
/// `s2p`: Retrieval task - `Default` in this example
#[arg(long, default_value = "s2p")]
task: Option<EncodeTask>,
}
// Tokenizer creation is super critical in our case.
// We are going to be `padding: Left` for each batch
fn create_tokenizer(tokenizer_file: &Path, which: Which) -> Result<Tokenizer> {
let mut tokenizer = Tokenizer::from_file(tokenizer_file).map_err(E::msg)?;
if which == Which::Large {
let pad_id = if let Some(pad_id) = tokenizer.token_to_id("<|endoftext|>") {
pad_id
} else {
return Err(anyhow!(
"Tokenizer doesn't contain expected `<|endoftext|>` token"
));
};
// This part is super important, we are padding the tokens to the *`left`* and not the usual *`right`* padding
tokenizer.with_padding(Some(PaddingParams {
strategy: PaddingStrategy::BatchLongest,
direction: PaddingDirection::Left,
pad_id,
pad_token: "<|endoftext|>".to_string(),
..Default::default()
}));
} else {
tokenizer.with_padding(Some(PaddingParams {
strategy: PaddingStrategy::BatchLongest,
direction: PaddingDirection::Right,
..Default::default()
}));
}
Ok(tokenizer)
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
let start = std::time::Instant::now();
let api = Api::new()?;
let embed_dim = match args.embed_dim {
Some(d) => d,
None => EmbedDim::Dim1024,
};
let (repo, cfg) = match args.which {
Which::Large => (
"dunzhang/stella_en_1.5B_v5",
Config::new_1_5_b_v5(embed_dim.embed_dim()),
),
Which::Small => (
"dunzhang/stella_en_400M_v5",
Config::new_400_m_v5(embed_dim.embed_dim()),
),
};
let repo = api.repo(Repo::model(repo.to_string()));
let tokenizer_filename = match args.tokenizer_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("tokenizer.json")?,
};
// Note, if you are providing `weight_files`, ensure that the `--embed_dim` dimensions provided matches the weights
// E.g. if you are using `--embed_dim 1024`, the weight files should include the `.safetensors` file from `2_Dense_1024` dir of the repo
let base_weight_files = match args.base_weight_files {
Some(files) => files
.split(',')
.map(std::path::PathBuf::from)
.collect::<Vec<_>>(),
None => {
vec![repo.get("model.safetensors")?]
}
};
let embed_weight_files = match args.embed_head_weight_files {
Some(files) => files
.split(',')
.map(std::path::PathBuf::from)
.collect::<Vec<_>>(),
None => {
let head_w_path = format!("{}/model.safetensors", embed_dim.embed_dim_default_dir());
vec![repo.get(&head_w_path)?]
}
};
println!("retrieved the files in {:?}", start.elapsed());
// Initializing the tokenizer which would require us to add padding to the `left` for batch encoding
let tokenizer = create_tokenizer(tokenizer_filename.as_path(), args.which)?;
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let dtype = DType::F32;
let base_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&base_weight_files, dtype, &device)? };
// Embedding layer is always built on F32 for accuracy
let embed_vb =
unsafe { VarBuilder::from_mmaped_safetensors(&embed_weight_files, DType::F32, &device)? };
let model = EmbeddingModel::new(&cfg, base_vb, embed_vb)?;
println!("loaded the model in {:?}", start.elapsed());
let mut embedding = Embedding::new(model, tokenizer, &device);
let task = args.task.map_or(EncodeTask::S2P, |t| t);
embedding.encode(task, args.query)
}
| candle/candle-examples/examples/stella-en-v5/main.rs/0 | {
"file_path": "candle/candle-examples/examples/stella-en-v5/main.rs",
"repo_id": "candle",
"token_count": 6512
} |
// https://github.com/openai/whisper/blob/main/whisper/model.py/rgs
// TODO:
// - Batch size greater than 1.
// - More token filters (SuppressBlanks, ApplyTimestampRules).
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use anyhow::{Error as E, Result};
use candle::{Device, IndexOp, Tensor};
use candle_nn::{ops::softmax, VarBuilder};
use clap::{Parser, ValueEnum};
use hf_hub::{api::sync::Api, Repo, RepoType};
use rand::{distributions::Distribution, SeedableRng};
use tokenizers::Tokenizer;
mod multilingual;
mod pcm_decode;
use candle_transformers::models::whisper::{self as m, audio, Config};
pub enum Model {
Normal(m::model::Whisper),
Quantized(m::quantized_model::Whisper),
}
// Maybe we should use some traits rather than doing the dispatch for all these.
impl Model {
pub fn config(&self) -> &Config {
match self {
Self::Normal(m) => &m.config,
Self::Quantized(m) => &m.config,
}
}
pub fn encoder_forward(&mut self, x: &Tensor, flush: bool) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.encoder.forward(x, flush),
Self::Quantized(m) => m.encoder.forward(x, flush),
}
}
pub fn decoder_forward(
&mut self,
x: &Tensor,
xa: &Tensor,
flush: bool,
) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.decoder.forward(x, xa, flush),
Self::Quantized(m) => m.decoder.forward(x, xa, flush),
}
}
pub fn decoder_final_linear(&self, x: &Tensor) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.decoder.final_linear(x),
Self::Quantized(m) => m.decoder.final_linear(x),
}
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
struct DecodingResult {
tokens: Vec<u32>,
text: String,
avg_logprob: f64,
no_speech_prob: f64,
temperature: f64,
compression_ratio: f64,
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
struct Segment {
start: f64,
duration: f64,
dr: DecodingResult,
}
struct Decoder {
model: Model,
rng: rand::rngs::StdRng,
task: Option<Task>,
timestamps: bool,
verbose: bool,
tokenizer: Tokenizer,
suppress_tokens: Tensor,
sot_token: u32,
transcribe_token: u32,
translate_token: u32,
eot_token: u32,
no_speech_token: u32,
no_timestamps_token: u32,
language_token: Option<u32>,
}
impl Decoder {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
seed: u64,
device: &Device,
language_token: Option<u32>,
task: Option<Task>,
timestamps: bool,
verbose: bool,
) -> Result<Self> {
let no_timestamps_token = token_id(&tokenizer, m::NO_TIMESTAMPS_TOKEN)?;
// Suppress the notimestamps token when in timestamps mode.
// https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L452
let suppress_tokens: Vec<f32> = (0..model.config().vocab_size as u32)
.map(|i| {
if model.config().suppress_tokens.contains(&i)
|| timestamps && i == no_timestamps_token
{
f32::NEG_INFINITY
} else {
0f32
}
})
.collect();
let suppress_tokens = Tensor::new(suppress_tokens.as_slice(), device)?;
let sot_token = token_id(&tokenizer, m::SOT_TOKEN)?;
let transcribe_token = token_id(&tokenizer, m::TRANSCRIBE_TOKEN)?;
let translate_token = token_id(&tokenizer, m::TRANSLATE_TOKEN)?;
let eot_token = token_id(&tokenizer, m::EOT_TOKEN)?;
let no_speech_token = m::NO_SPEECH_TOKENS
.iter()
.find_map(|token| token_id(&tokenizer, token).ok());
let no_speech_token = match no_speech_token {
None => anyhow::bail!("unable to find any non-speech token"),
Some(n) => n,
};
Ok(Self {
model,
rng: rand::rngs::StdRng::seed_from_u64(seed),
tokenizer,
task,
timestamps,
verbose,
suppress_tokens,
sot_token,
transcribe_token,
translate_token,
eot_token,
no_speech_token,
language_token,
no_timestamps_token,
})
}
fn decode(&mut self, mel: &Tensor, t: f64) -> Result<DecodingResult> {
let model = &mut self.model;
let audio_features = model.encoder_forward(mel, true)?;
if self.verbose {
println!("audio features: {:?}", audio_features.dims());
}
let sample_len = model.config().max_target_positions / 2;
let mut sum_logprob = 0f64;
let mut no_speech_prob = f64::NAN;
let mut tokens = vec![self.sot_token];
if let Some(language_token) = self.language_token {
tokens.push(language_token);
}
match self.task {
None | Some(Task::Transcribe) => tokens.push(self.transcribe_token),
Some(Task::Translate) => tokens.push(self.translate_token),
}
if !self.timestamps {
tokens.push(self.no_timestamps_token);
}
for i in 0..sample_len {
let tokens_t = Tensor::new(tokens.as_slice(), mel.device())?;
// The model expects a batch dim but this inference loop does not handle
// it so we add it at this point.
let tokens_t = tokens_t.unsqueeze(0)?;
let ys = model.decoder_forward(&tokens_t, &audio_features, i == 0)?;
// Extract the no speech probability on the first iteration by looking at the first
// token logits and the probability for the according token.
if i == 0 {
let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?;
no_speech_prob = softmax(&logits, 0)?
.i(self.no_speech_token as usize)?
.to_scalar::<f32>()? as f64;
}
let (_, seq_len, _) = ys.dims3()?;
let logits = model
.decoder_final_linear(&ys.i((..1, seq_len - 1..))?)?
.i(0)?
.i(0)?;
// TODO: Besides suppress tokens, we should apply the heuristics from
// ApplyTimestampRules, i.e.:
// - Timestamps come in pairs, except before EOT.
// - Timestamps should be non-decreasing.
// - If the sum of the probabilities of timestamps is higher than any other tokens,
// only consider timestamps when sampling.
// https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L439
let logits = logits.broadcast_add(&self.suppress_tokens)?;
let next_token = if t > 0f64 {
let prs = softmax(&(&logits / t)?, 0)?;
let logits_v: Vec<f32> = prs.to_vec1()?;
let distr = rand::distributions::WeightedIndex::new(&logits_v)?;
distr.sample(&mut self.rng) as u32
} else {
let logits_v: Vec<f32> = logits.to_vec1()?;
logits_v
.iter()
.enumerate()
.max_by(|(_, u), (_, v)| u.total_cmp(v))
.map(|(i, _)| i as u32)
.unwrap()
};
tokens.push(next_token);
let prob = softmax(&logits, candle::D::Minus1)?
.i(next_token as usize)?
.to_scalar::<f32>()? as f64;
if next_token == self.eot_token || tokens.len() > model.config().max_target_positions {
break;
}
sum_logprob += prob.ln();
}
let text = self.tokenizer.decode(&tokens, true).map_err(E::msg)?;
let avg_logprob = sum_logprob / tokens.len() as f64;
Ok(DecodingResult {
tokens,
text,
avg_logprob,
no_speech_prob,
temperature: t,
compression_ratio: f64::NAN,
})
}
fn decode_with_fallback(&mut self, segment: &Tensor) -> Result<DecodingResult> {
for (i, &t) in m::TEMPERATURES.iter().enumerate() {
let dr: Result<DecodingResult> = self.decode(segment, t);
if i == m::TEMPERATURES.len() - 1 {
return dr;
}
// On errors, we try again with a different temperature.
match dr {
Ok(dr) => {
let needs_fallback = dr.compression_ratio > m::COMPRESSION_RATIO_THRESHOLD
|| dr.avg_logprob < m::LOGPROB_THRESHOLD;
if !needs_fallback || dr.no_speech_prob > m::NO_SPEECH_THRESHOLD {
return Ok(dr);
}
}
Err(err) => {
println!("Error running at {t}: {err}")
}
}
}
unreachable!()
}
fn run(&mut self, mel: &Tensor) -> Result<Vec<Segment>> {
let (_, _, content_frames) = mel.dims3()?;
let mut seek = 0;
let mut segments = vec![];
while seek < content_frames {
let start = std::time::Instant::now();
let time_offset = (seek * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64;
let segment_size = usize::min(content_frames - seek, m::N_FRAMES);
let mel_segment = mel.narrow(2, seek, segment_size)?;
let segment_duration = (segment_size * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64;
let dr = self.decode_with_fallback(&mel_segment)?;
seek += segment_size;
if dr.no_speech_prob > m::NO_SPEECH_THRESHOLD && dr.avg_logprob < m::LOGPROB_THRESHOLD {
println!("no speech detected, skipping {seek} {dr:?}");
continue;
}
let segment = Segment {
start: time_offset,
duration: segment_duration,
dr,
};
if self.timestamps {
println!(
"{:.1}s -- {:.1}s",
segment.start,
segment.start + segment.duration,
);
let mut tokens_to_decode = vec![];
let mut prev_timestamp_s = 0f32;
for &token in segment.dr.tokens.iter() {
if token == self.sot_token || token == self.eot_token {
continue;
}
// The no_timestamp_token is the last before the timestamp ones.
if token > self.no_timestamps_token {
let timestamp_s = (token - self.no_timestamps_token + 1) as f32 / 50.;
if !tokens_to_decode.is_empty() {
let text = self
.tokenizer
.decode(&tokens_to_decode, true)
.map_err(E::msg)?;
println!(" {:.1}s-{:.1}s: {}", prev_timestamp_s, timestamp_s, text);
tokens_to_decode.clear()
}
prev_timestamp_s = timestamp_s;
} else {
tokens_to_decode.push(token)
}
}
if !tokens_to_decode.is_empty() {
let text = self
.tokenizer
.decode(&tokens_to_decode, true)
.map_err(E::msg)?;
if !text.is_empty() {
println!(" {:.1}s-...: {}", prev_timestamp_s, text);
}
tokens_to_decode.clear()
}
} else {
println!(
"{:.1}s -- {:.1}s: {}",
segment.start,
segment.start + segment.duration,
segment.dr.text,
)
}
if self.verbose {
println!("{seek}: {segment:?}, in {:?}", start.elapsed());
}
segments.push(segment)
}
Ok(segments)
}
}
pub fn token_id(tokenizer: &Tokenizer, token: &str) -> candle::Result<u32> {
match tokenizer.token_to_id(token) {
None => candle::bail!("no token-id for {token}"),
Some(id) => Ok(id),
}
}
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Task {
Transcribe,
Translate,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)]
enum WhichModel {
Tiny,
#[value(name = "tiny.en")]
TinyEn,
Base,
#[value(name = "base.en")]
BaseEn,
Small,
#[value(name = "small.en")]
SmallEn,
Medium,
#[value(name = "medium.en")]
MediumEn,
Large,
LargeV2,
LargeV3,
LargeV3Turbo,
#[value(name = "distil-medium.en")]
DistilMediumEn,
#[value(name = "distil-large-v2")]
DistilLargeV2,
#[value(name = "distil-large-v3")]
DistilLargeV3,
}
impl WhichModel {
fn is_multilingual(&self) -> bool {
match self {
Self::Tiny
| Self::Base
| Self::Small
| Self::Medium
| Self::Large
| Self::LargeV2
| Self::LargeV3
| Self::LargeV3Turbo
| Self::DistilLargeV2
| Self::DistilLargeV3 => true,
Self::TinyEn | Self::BaseEn | Self::SmallEn | Self::MediumEn | Self::DistilMediumEn => {
false
}
}
}
fn model_and_revision(&self) -> (&'static str, &'static str) {
match self {
Self::Tiny => ("openai/whisper-tiny", "main"),
Self::TinyEn => ("openai/whisper-tiny.en", "refs/pr/15"),
Self::Base => ("openai/whisper-base", "refs/pr/22"),
Self::BaseEn => ("openai/whisper-base.en", "refs/pr/13"),
Self::Small => ("openai/whisper-small", "main"),
Self::SmallEn => ("openai/whisper-small.en", "refs/pr/10"),
Self::Medium => ("openai/whisper-medium", "main"),
Self::MediumEn => ("openai/whisper-medium.en", "main"),
Self::Large => ("openai/whisper-large", "refs/pr/36"),
Self::LargeV2 => ("openai/whisper-large-v2", "refs/pr/57"),
Self::LargeV3 => ("openai/whisper-large-v3", "main"),
Self::LargeV3Turbo => ("openai/whisper-large-v3-turbo", "main"),
Self::DistilMediumEn => ("distil-whisper/distil-medium.en", "main"),
Self::DistilLargeV2 => ("distil-whisper/distil-large-v2", "main"),
Self::DistilLargeV3 => ("distil-whisper/distil-large-v3", "main"),
}
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(long)]
model_id: Option<String>,
/// The model to use, check out available models:
/// https://huggingface.co/models?search=whisper
#[arg(long)]
revision: Option<String>,
/// The model to be used, can be tiny, small, medium.
#[arg(long, default_value = "tiny.en")]
model: WhichModel,
/// The input to be processed, in wav format, will default to `jfk.wav`. Alternatively
/// this can be set to sample:jfk, sample:gb1, ... to fetch a sample from the following
/// repo: https://huggingface.co/datasets/Narsil/candle_demo/
#[arg(long)]
input: Option<String>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
quantized: bool,
/// Language.
#[arg(long)]
language: Option<String>,
/// Task, when no task is specified, the input tokens contain only the sot token which can
/// improve things when in no-timestamp mode.
#[arg(long)]
task: Option<Task>,
/// Timestamps mode, this is not fully implemented yet.
#[arg(long)]
timestamps: bool,
/// Print the full DecodingResult structure rather than just the text.
#[arg(long)]
verbose: bool,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let device = candle_examples::device(args.cpu)?;
let (default_model, default_revision) = if args.quantized {
("lmz/candle-whisper", "main")
} else {
args.model.model_and_revision()
};
let default_model = default_model.to_string();
let default_revision = default_revision.to_string();
let (model_id, revision) = match (args.model_id, args.revision) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, default_revision),
};
let (config_filename, tokenizer_filename, weights_filename, input) = {
let api = Api::new()?;
let dataset = api.dataset("Narsil/candle-examples".to_string());
let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision));
let sample = if let Some(input) = args.input {
if let Some(sample) = input.strip_prefix("sample:") {
dataset.get(&format!("samples_{sample}.wav"))?
} else {
std::path::PathBuf::from(input)
}
} else {
println!("No audio file submitted: Downloading https://huggingface.co/datasets/Narsil/candle_demo/blob/main/samples_jfk.wav");
dataset.get("samples_jfk.wav")?
};
let (config, tokenizer, model) = if args.quantized {
let ext = match args.model {
WhichModel::TinyEn => "tiny-en",
WhichModel::Tiny => "tiny",
_ => unimplemented!("no quantized support for {:?}", args.model),
};
(
repo.get(&format!("config-{ext}.json"))?,
repo.get(&format!("tokenizer-{ext}.json"))?,
repo.get(&format!("model-{ext}-q80.gguf"))?,
)
} else {
let config = repo.get("config.json")?;
let tokenizer = repo.get("tokenizer.json")?;
let model = repo.get("model.safetensors")?;
(config, tokenizer, model)
};
(config, tokenizer, model, sample)
};
let config: Config = serde_json::from_str(&std::fs::read_to_string(config_filename)?)?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let mel_bytes = match config.num_mel_bins {
80 => include_bytes!("melfilters.bytes").as_slice(),
128 => include_bytes!("melfilters128.bytes").as_slice(),
nmel => anyhow::bail!("unexpected num_mel_bins {nmel}"),
};
let mut mel_filters = vec![0f32; mel_bytes.len() / 4];
<byteorder::LittleEndian as byteorder::ByteOrder>::read_f32_into(mel_bytes, &mut mel_filters);
let (pcm_data, sample_rate) = pcm_decode::pcm_decode(input)?;
if sample_rate != m::SAMPLE_RATE as u32 {
anyhow::bail!("input file must have a {} sampling rate", m::SAMPLE_RATE)
}
println!("pcm data loaded {}", pcm_data.len());
let mel = audio::pcm_to_mel(&config, &pcm_data, &mel_filters);
let mel_len = mel.len();
let mel = Tensor::from_vec(
mel,
(1, config.num_mel_bins, mel_len / config.num_mel_bins),
&device,
)?;
println!("loaded mel: {:?}", mel.dims());
let mut model = if args.quantized {
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
&weights_filename,
&device,
)?;
Model::Quantized(m::quantized_model::Whisper::load(&vb, config)?)
} else {
let vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], m::DTYPE, &device)? };
Model::Normal(m::model::Whisper::load(&vb, config)?)
};
let language_token = match (args.model.is_multilingual(), args.language) {
(true, None) => Some(multilingual::detect_language(&mut model, &tokenizer, &mel)?),
(false, None) => None,
(true, Some(language)) => match token_id(&tokenizer, &format!("<|{language}|>")) {
Ok(token_id) => Some(token_id),
Err(_) => anyhow::bail!("language {language} is not supported"),
},
(false, Some(_)) => {
anyhow::bail!("a language cannot be set for non-multilingual models")
}
};
let mut dc = Decoder::new(
model,
tokenizer,
args.seed,
&device,
language_token,
args.task,
args.timestamps,
args.verbose,
)?;
dc.run(&mel)?;
Ok(())
}
| candle/candle-examples/examples/whisper/main.rs/0 | {
"file_path": "candle/candle-examples/examples/whisper/main.rs",
"repo_id": "candle",
"token_count": 10862
} |
// Build script to run nvcc and generate the C glue code for launching the flash-attention kernel.
// The cuda build time is very long so one can set the CANDLE_FLASH_ATTN_BUILD_DIR environment
// variable in order to cache the compiled artifacts and avoid recompiling too often.
use anyhow::{Context, Result};
use std::path::PathBuf;
const KERNEL_FILES: [&str; 33] = [
"kernels/flash_api.cu",
"kernels/flash_fwd_hdim128_fp16_sm80.cu",
"kernels/flash_fwd_hdim160_fp16_sm80.cu",
"kernels/flash_fwd_hdim192_fp16_sm80.cu",
"kernels/flash_fwd_hdim224_fp16_sm80.cu",
"kernels/flash_fwd_hdim256_fp16_sm80.cu",
"kernels/flash_fwd_hdim32_fp16_sm80.cu",
"kernels/flash_fwd_hdim64_fp16_sm80.cu",
"kernels/flash_fwd_hdim96_fp16_sm80.cu",
"kernels/flash_fwd_hdim128_bf16_sm80.cu",
"kernels/flash_fwd_hdim160_bf16_sm80.cu",
"kernels/flash_fwd_hdim192_bf16_sm80.cu",
"kernels/flash_fwd_hdim224_bf16_sm80.cu",
"kernels/flash_fwd_hdim256_bf16_sm80.cu",
"kernels/flash_fwd_hdim32_bf16_sm80.cu",
"kernels/flash_fwd_hdim64_bf16_sm80.cu",
"kernels/flash_fwd_hdim96_bf16_sm80.cu",
"kernels/flash_fwd_hdim128_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim160_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim192_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim224_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim256_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim32_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim64_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim96_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim128_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim160_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim192_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim224_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim256_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim32_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim64_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim96_bf16_causal_sm80.cu",
];
fn main() -> Result<()> {
println!("cargo:rerun-if-changed=build.rs");
for kernel_file in KERNEL_FILES.iter() {
println!("cargo:rerun-if-changed={kernel_file}");
}
println!("cargo:rerun-if-changed=kernels/flash_fwd_kernel.h");
println!("cargo:rerun-if-changed=kernels/flash_fwd_launch_template.h");
println!("cargo:rerun-if-changed=kernels/flash.h");
println!("cargo:rerun-if-changed=kernels/philox.cuh");
println!("cargo:rerun-if-changed=kernels/softmax.h");
println!("cargo:rerun-if-changed=kernels/utils.h");
println!("cargo:rerun-if-changed=kernels/kernel_traits.h");
println!("cargo:rerun-if-changed=kernels/block_info.h");
println!("cargo:rerun-if-changed=kernels/static_switch.h");
println!("cargo:rerun-if-changed=kernels/hardware_info.h");
let out_dir = PathBuf::from(std::env::var("OUT_DIR").context("OUT_DIR not set")?);
let build_dir = match std::env::var("CANDLE_FLASH_ATTN_BUILD_DIR") {
Err(_) =>
{
#[allow(clippy::redundant_clone)]
out_dir.clone()
}
Ok(build_dir) => {
let path = PathBuf::from(build_dir);
path.canonicalize().expect(&format!(
"Directory doesn't exists: {} (the current directory is {})",
&path.display(),
std::env::current_dir()?.display()
))
}
};
let kernels = KERNEL_FILES.iter().collect();
let mut builder = bindgen_cuda::Builder::default()
.kernel_paths(kernels)
.out_dir(build_dir.clone())
.arg("-std=c++17")
.arg("-O3")
.arg("-U__CUDA_NO_HALF_OPERATORS__")
.arg("-U__CUDA_NO_HALF_CONVERSIONS__")
.arg("-U__CUDA_NO_HALF2_OPERATORS__")
.arg("-U__CUDA_NO_BFLOAT16_CONVERSIONS__")
.arg("-Icutlass/include")
.arg("--expt-relaxed-constexpr")
.arg("--expt-extended-lambda")
.arg("--use_fast_math")
.arg("--verbose");
if let Ok(target) = std::env::var("TARGET") {
if target.contains("msvc") {
builder = builder.arg("-D_USE_MATH_DEFINES");
}
}
let out_file = build_dir.join("libflashattention.a");
builder.build_lib(out_file);
println!("cargo:rustc-link-search={}", build_dir.display());
println!("cargo:rustc-link-lib=flashattention");
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=stdc++");
Ok(())
}
| candle/candle-flash-attn/build.rs/0 | {
"file_path": "candle/candle-flash-attn/build.rs",
"repo_id": "candle",
"token_count": 2163
} |
/******************************************************************************
* Copyright (c) 2024, Tri Dao.
******************************************************************************/
#pragma once
#include <cute/tensor.hpp>
#include "utils.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace flash {
using namespace cute;
////////////////////////////////////////////////////////////////////////////////////////////////////
template <bool Is_even_K=true, bool Clear_OOB_K=true,
typename Engine0, typename Layout0, typename Engine1, typename Layout1,
typename Engine2, typename Layout2, typename Engine3, typename Layout3>
__forceinline__ __device__ void copy_rotary_interleaved(Tensor<Engine0, Layout0> const &S,
Tensor<Engine1, Layout1> &D,
Tensor<Engine2, Layout2> const &Cos,
Tensor<Engine2, Layout2> const &Sin,
Tensor<Engine3, Layout3> const &identity_MN,
const int max_MN, const int min_MN,
const int dim, const int rotary_dim) {
CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{});
CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{});
CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA
CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K
CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K
CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K
CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin)); // MMA_K
static_assert(decltype(size<0>(S))::value == decltype(size<0>(Cos))::value * 2);
static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32
Tensor rCos = make_fragment_like(Cos);
Tensor rSin = make_fragment_like(Sin);
Tensor rS = make_fragment_like(S);
#pragma unroll
for (int m = 0; m < size<1>(S); ++m) {
if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) {
#pragma unroll
for (int k = 0; k < size<2>(S); ++k) {
if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) {
cute::copy(S(_, m, k), rS(_, m, k));
if (get<1>(identity_MN(0, 0, k)) < rotary_dim) {
cute::copy(Cos(_, m, k), rCos(_, m, k));
cute::copy(Sin(_, m, k), rSin(_, m, k));
Tensor S_fp32 = convert_type<float>(rS(_, m, k));
Tensor cos_fp32 = convert_type<float>(rCos(_, m, k));
Tensor sin_fp32 = convert_type<float>(rSin(_, m, k));
#pragma unroll
for (int i = 0; i < size<0>(rS) / 2; ++i) {
float real = S_fp32(2 * i) * cos_fp32(i) - S_fp32(2 * i + 1) * sin_fp32(i);
float imag = S_fp32(2 * i) * sin_fp32(i) + S_fp32(2 * i + 1) * cos_fp32(i);
S_fp32(2 * i) = real;
S_fp32(2 * i + 1) = imag;
}
// Idk but I need to copy for the convert_type to work
Tensor S_fp32_copy = make_fragment_like(S_fp32);
cute::copy(S_fp32, S_fp32_copy);
using T = typename Engine0::value_type;
Tensor S_og_type = convert_type<T>(S_fp32_copy);
cute::copy(S_og_type, rS(_, m, k));
}
cute::copy(rS(_, m, k), D(_, m, k));
} else if (Clear_OOB_K) {
cute::clear(D(_, m, k));
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <bool Is_even_K=true, bool Clear_OOB_K=true,
typename Engine0, typename Layout0, typename Engine1, typename Layout1,
typename Engine2, typename Layout2, typename Engine3, typename Layout3>
__forceinline__ __device__ void copy_rotary_contiguous(Tensor<Engine0, Layout0> const &S,
Tensor<Engine1, Layout1> &D,
Tensor<Engine2, Layout2> const &Cos,
Tensor<Engine2, Layout2> const &Sin,
Tensor<Engine3, Layout3> const &identity_MN,
const int max_MN, const int min_MN,
const int dim, const int rotary_dim) {
CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{});
CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{});
CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA
CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K
CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K
CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M
CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K
CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(Cos)); // MMA
CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin));
static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32
Tensor rCos = make_fragment_like(Cos);
Tensor rSin = make_fragment_like(Sin);
Tensor rS = make_fragment_like(S);
Tensor rS_other = make_fragment_like(rS(_, 0, 0));
#pragma unroll
for (int m = 0; m < size<1>(S); ++m) {
if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) {
#pragma unroll
for (int k = 0; k < size<2>(S); ++k) {
if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) {
cute::copy(S(_, m, k), rS(_, m, k));
if (get<1>(identity_MN(0, 0, k)) < rotary_dim) {
const bool is_left = get<1>(identity_MN(0, 0, k)) < rotary_dim / 2;
Tensor gS_other = make_tensor(S(_, m, k).data() + (is_left ? rotary_dim / 2 : -rotary_dim / 2), S(_, m, k).layout());
cute::copy(gS_other, rS_other);
// if (cute::thread0()) { print_tensor(rS(_, m, k)); print_tensor(rS_other); }
Tensor gCos = make_tensor(Cos(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Cos(_, m, k).layout());
Tensor gSin = make_tensor(Sin(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Sin(_, m, k).layout());
cute::copy(gCos, rCos(_, m, k));
cute::copy(gSin, rSin(_, m, k));
// if (cute::thread0()) { print_tensor(rCos(_, m, k)); print_tensor(rSin(_, m, k)); }
Tensor S_fp32 = convert_type<float>(rS(_, m, k));
Tensor S_other_fp32 = convert_type<float>(rS_other);
Tensor cos_fp32 = convert_type<float>(rCos(_, m, k));
Tensor sin_fp32 = convert_type<float>(rSin(_, m, k));
#pragma unroll
for (int i = 0; i < size<0>(rS); ++i) {
S_fp32(i) = S_fp32(i) * cos_fp32(i) + S_other_fp32(i) * (is_left ? -sin_fp32(i) : sin_fp32(i));
}
// Idk but I need to copy for the convert_type to work
Tensor S_fp32_copy = make_fragment_like(S_fp32);
cute::copy(S_fp32, S_fp32_copy);
using T = typename Engine0::value_type;
Tensor S_og_type = convert_type<T>(S_fp32_copy);
cute::copy(S_og_type, rS(_, m, k));
// if (cute::thread0()) { print_tensor(rS(_, m, k)); }
}
cute::copy(rS(_, m, k), D(_, m, k));
} else if (Clear_OOB_K) {
cute::clear(D(_, m, k));
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace flash
| candle/candle-flash-attn/kernels/rotary.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/rotary.h",
"repo_id": "candle",
"token_count": 5052
} |
#include "compatibility.cuh"
#include<stdint.h>
#include<cmath>
// TODO: This is often used to check that the data is contiguous so that
// kernels can be easily mapped. However this only returns true for row
// major, if all the inputs are column major, we could apply the fast path
// too (but we wouldn't if some of them are row major and some column major).
__device__ bool is_contiguous(
const size_t num_dims,
const size_t *dims,
const size_t *strides
) {
size_t acc = 1;
for (unsigned int d = 0; d < num_dims; d++) {
unsigned int dim_idx = num_dims - 1 - d;
if (dims[dim_idx] > 1 && acc != strides[dim_idx]) {
return false;
}
acc *= dims[dim_idx];
}
return true;
}
__device__ unsigned int get_strided_index(
unsigned int idx,
const size_t num_dims,
const size_t *dims,
const size_t *strides
) {
unsigned int strided_i = 0;
for (unsigned int d = 0; d < num_dims; d++) {
unsigned int dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
__device__ unsigned int restrided(
const unsigned int strided_i,
const size_t num_dims,
const size_t *dims,
const size_t *strides,
const size_t *new_strides
) {
unsigned int idx = 0;
for (int d = 0; d < num_dims; d++) {
idx += (strides[d] == 0 ? 0 : (strided_i / strides[d]) % dims[d]) * new_strides[d];
}
return idx;
}
// Sourced from https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
// Input must be less than or equal to 2 ^ 16
// used in reductions
__device__ __forceinline__ unsigned int next_power_of_two(unsigned int v) {
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v++;
return v;
}
// Efficiently computes the sum of each chunk in "data" of size chunk_len, and
// stores the sums in out[i / chunk_len]
template<typename T>
__device__ void chunk_sum(
const size_t chunk_len,
const T data,
T* out
) {
__shared__ T buf[1024];
// assumes that threads where i >= numel have already exited
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int block_i = threadIdx.x;
// Fall back to atomicAdd if chunk_len is small to reduce overhead
if (chunk_len <= 2) {
atomicAdd(out + i / chunk_len, data);
return;
}
buf[block_i] = data;
unsigned int chunk_i = i % chunk_len;
unsigned int chunk_start = max((int)(block_i - chunk_i), 0);
unsigned int chunk_end = min((unsigned int)(block_i + chunk_len - chunk_i), blockDim.x);
chunk_i = block_i - chunk_start;
size_t max_chunk_len = min(chunk_end - chunk_start, blockDim.x);
size_t incr = next_power_of_two(max_chunk_len) >> 1;
__syncthreads();
// Uses sequential addressing as discussed in
// https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
for (; incr > 0; incr >>= 1) {
unsigned int block_i_2 = block_i + incr;
if (block_i_2 < chunk_end && chunk_i < incr) {
// This is sound because __syncthreads and the conditions above
// ensure that no data races occur
buf[block_i] += buf[block_i_2];
}
__syncthreads();
}
if (block_i == chunk_start) {
atomicAdd(out + i / chunk_len, buf[block_i]);
}
}
__device__ __forceinline__ bool isnang(float a) { return isnan(a); }
__device__ __forceinline__ bool isnang(double a) { return isnan(a); }
__device__ __forceinline__ float recipg(float a) { return 1.0 / a; }
__device__ __forceinline__ double recipg(double a) { return 1.0 / a; }
__device__ __forceinline__ float cosg(float a) { return cosf(a); }
__device__ __forceinline__ double cosg(double a) { return cos(a); }
__device__ __forceinline__ float sing(float a) { return sinf(a); }
__device__ __forceinline__ double sing(double a) { return sin(a); }
__device__ __forceinline__ float sqrtg(float a) { return sqrtf(a); }
__device__ __forceinline__ double sqrtg(double a) { return sqrt(a); }
__device__ __forceinline__ float powg(float a, float b) { return powf(a, b); }
__device__ __forceinline__ double powg(double a, double b) { return pow(a, b); }
__device__ __forceinline__ float tanhg(float a) { return tanhf(a); }
__device__ __forceinline__ double tanhg(double a) { return tanh(a); }
__device__ __forceinline__ float erfg(float a) { return erff(a); }
__device__ __forceinline__ double erfg(double a) { return erf(a); }
__device__ __forceinline__ float ceilg(float a) { return ceilf(a); }
__device__ __forceinline__ double ceilg(double a) { return ceil(a); }
__device__ __forceinline__ float floorg(float a) { return floorf(a); }
__device__ __forceinline__ double floorg(double a) { return floor(a); }
__device__ __forceinline__ float roundg(float a) { return roundf(a); }
__device__ __forceinline__ double roundg(double a) { return round(a); }
__device__ __forceinline__ float normcdfg(float a) { return normcdff(a); }
__device__ __forceinline__ double normcdfg(double a) { return normcdf(a); }
__device__ __forceinline__ float maxg(float a, float b) { return fmaxf(a, b); }
__device__ __forceinline__ double maxg(double a, double b) { return fmax(a, b); }
__device__ __forceinline__ float ming(float a, float b) { return fminf(a, b); }
__device__ __forceinline__ double ming(double a, double b) { return fmin(a, b); }
__device__ __forceinline__ float logg(float a) { return logf(a); }
__device__ __forceinline__ double logg(double a) { return log(a); }
__device__ __forceinline__ float expg(float a) { return expf(a); }
__device__ __forceinline__ double expg(double a) { return exp(a); }
__device__ __forceinline__ float absg(float a) { return fabsf(a); }
__device__ __forceinline__ double absg(double a) { return fabs(a); }
__device__ __forceinline__ float copysigng(float a, float b) { return copysignf(a, b); }
__device__ __forceinline__ double copysigng(double a, double b) { return copysign(a, b); }
__device__ __forceinline__ int64_t ming(int64_t a, int64_t b) { return min(a, b); }
__device__ __forceinline__ int64_t maxg(int64_t a, int64_t b) { return max(a, b); }
__device__ __forceinline__ uint32_t ming(uint32_t a, uint32_t b) { return min(a, b); }
__device__ __forceinline__ uint32_t maxg(uint32_t a, uint32_t b) { return max(a, b); }
__device__ __forceinline__ uint8_t ming(uint8_t a, uint8_t b) { return min(a, b); }
__device__ __forceinline__ uint8_t maxg(uint8_t a, uint8_t b) { return max(a, b); }
#if __CUDA_ARCH__ >= 530
__device__ __forceinline__ __half powg(__half a, __half b) { return __float2half(powf(__half2float(a), __half2float(b))); }
__device__ __forceinline__ bool isnang(__half a) { return __hisnan(a); }
__device__ __forceinline__ __half sqrtg(__half a) { return hsqrt(a); }
__device__ __forceinline__ __half cosg(__half a) { return hcos(a); }
__device__ __forceinline__ __half sing(__half a) { return hsin(a); }
__device__ __forceinline__ __half recipg(__half a) { __half one = 1.0; return one / a; }
__device__ __forceinline__ __half maxg(__half a, __half b) { return __hmax_nan(a, b); }
__device__ __forceinline__ __half tanhg(__half a) { return __float2half(tanhf(__half2float(a))); }
__device__ __forceinline__ __half erfg(__half a) { return __float2half(erff(__half2float(a))); }
__device__ __forceinline__ __half ceilg(__half a) { return __float2half(ceilf(__half2float(a))); }
__device__ __forceinline__ __half floorg(__half a) { return __float2half(floorf(__half2float(a))); }
__device__ __forceinline__ __half roundg(__half a) { return __float2half(roundf(__half2float(a))); }
__device__ __forceinline__ __half normcdfg(__half a) { return __float2half(normcdff(__half2float(a))); }
__device__ __forceinline__ __half ming(__half a, __half b) { return __hmin_nan(a, b); }
__device__ __forceinline__ __half logg(__half a) { return hlog(a); }
__device__ __forceinline__ __half expg(__half a) { return hexp(a); }
__device__ __forceinline__ __half absg(__half a) { return __habs(a); }
__device__ __forceinline__ __half copysigng(__half a, __half b) { return __float2half(copysignf(__half2float(a), __half2float(b))); }
#endif
#if __CUDA_ARCH__ >= 800
__device__ __forceinline__ __nv_bfloat16 powg(__nv_bfloat16 a, __nv_bfloat16 b) { return __float2bfloat16(powf(__bfloat162float(a), __bfloat162float(b))); }
__device__ __forceinline__ bool isnang(__nv_bfloat16 a) { return __hisnan(a); }
__device__ __forceinline__ __nv_bfloat16 sqrtg(__nv_bfloat16 a) { return hsqrt(a); }
__device__ __forceinline__ __nv_bfloat16 cosg(__nv_bfloat16 a) { return hcos(a); }
__device__ __forceinline__ __nv_bfloat16 sing(__nv_bfloat16 a) { return hsin(a); }
__device__ __forceinline__ __nv_bfloat16 recipg(__nv_bfloat16 a) { __nv_bfloat16 one = 1.0; return one / a; }
__device__ __forceinline__ __nv_bfloat16 maxg(__nv_bfloat16 a, __nv_bfloat16 b) { return __hmax_nan(a, b); }
__device__ __forceinline__ __nv_bfloat16 tanhg(__nv_bfloat16 a) { return __float2bfloat16(tanhf(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 erfg(__nv_bfloat16 a) { return __float2bfloat16(erff(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 ceilg(__nv_bfloat16 a) { return __float2bfloat16(ceilf(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 floorg(__nv_bfloat16 a) { return __float2bfloat16(floorf(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 roundg(__nv_bfloat16 a) { return __float2bfloat16(roundf(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 normcdfg(__nv_bfloat16 a) { return __float2bfloat16(normcdff(__bfloat162float(a))); }
__device__ __forceinline__ __nv_bfloat16 ming(__nv_bfloat16 a, __nv_bfloat16 b) { return __hmin_nan(a, b); }
__device__ __forceinline__ __nv_bfloat16 logg(__nv_bfloat16 a) { return hlog(a); }
__device__ __forceinline__ __nv_bfloat16 expg(__nv_bfloat16 a) { return hexp(a); }
__device__ __forceinline__ __nv_bfloat16 absg(__nv_bfloat16 a) { return __habs(a); }
__device__ __forceinline__ __nv_bfloat16 copysigng(__nv_bfloat16 a, __nv_bfloat16 b) { return __float2bfloat16(copysignf(__bfloat162float(a), __bfloat162float(b))); }
#endif
| candle/candle-kernels/src/cuda_utils.cuh/0 | {
"file_path": "candle/candle-kernels/src/cuda_utils.cuh",
"repo_id": "candle",
"token_count": 3947
} |
#include <metal_stdlib>
using namespace metal;
template<typename T> METAL_FUNC void fill_with(
device T *out,
constant float &value,
constant size_t &numel,
uint tid [[thread_position_in_grid]]
) {
if (tid >= numel) {
return;
}
out[tid] = static_cast<T>(value);
}
#define FILL_OP(NAME, T) \
kernel void fill_##NAME( \
device T *out, \
constant float &value, \
constant size_t &numel, \
uint tid [[thread_position_in_grid]] \
) { \
fill_with<T>(out, value, numel, tid); \
} \
#define FILL_OPS(NAME, T) \
FILL_OP(NAME, T) \
FILL_OPS(u8, uchar)
FILL_OPS(u32, uint)
FILL_OPS(i64, long)
FILL_OPS(f16, half)
FILL_OPS(f32, float)
#if __METAL_VERSION__ >= 310
FILL_OPS(bf16, bfloat)
#endif
| candle/candle-metal-kernels/src/fill.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/fill.metal",
"repo_id": "candle",
"token_count": 638
} |
use metal::{Buffer, ComputeCommandEncoderRef, ComputePipelineState, MTLSize};
use std::ffi::c_void;
/// Most kernels apply similarly across the tensors
/// This creates a strategy that uses the maximum amount of threads per threadgroup (capped at the
/// actual total buffer length).
/// Then kernels can just do their op on their single point in the buffer.
pub(crate) fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) {
let size = length as u64;
let width = std::cmp::min(pipeline.max_total_threads_per_threadgroup(), size);
let count = size.div_ceil(width);
let thread_group_count = MTLSize {
width: count,
height: 1,
depth: 1,
};
let thread_group_size = MTLSize {
width,
height: 1,
depth: 1,
};
(thread_group_count, thread_group_size)
}
// https://github.com/ml-explore/mlx/blob/bddf23f175726a57f0e443cd45518c0757daa166/mlx/backend/metal/utils.h#L96
pub fn get_block_dims(dim0: u64, dim1: u64, dim2: u64) -> MTLSize {
let mut pows0 = 0u64;
let mut pows1 = 0u64;
let mut pows2 = 0u64;
let mut sum = 0u64;
loop {
let presum = sum;
// Check all the pows
if dim0 >= (1 << (pows0 + 1)) {
pows0 += 1;
sum += 1;
}
if sum == 10 {
break;
}
if dim1 >= (1 << (pows1 + 1)) {
pows1 += 1;
sum += 1;
}
if sum == 10 {
break;
}
if dim2 >= (1 << (pows2 + 1)) {
pows2 += 1;
sum += 1;
}
if sum == presum || sum == 10 {
break;
}
}
MTLSize {
width: 1 << pows0,
height: 1 << pows1,
depth: 1 << pows2,
}
}
pub fn set_param<P: EncoderParam>(encoder: &ComputeCommandEncoderRef, position: u64, data: P) {
<P as EncoderParam>::set_param(encoder, position, data)
}
/// Helper functions to create the various objects on the compute command encoder
/// on a single line.
/// Prevents getting wrong some arguments number and mixing length and size in bytes.
pub trait EncoderParam {
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self);
}
macro_rules! primitive {
($type:ty) => {
impl EncoderParam for $type {
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) {
encoder.set_bytes(
position,
core::mem::size_of::<$type>() as u64,
&data as *const $type as *const c_void,
);
}
}
};
}
primitive!(bool);
primitive!(usize);
primitive!(i32);
primitive!(i64);
primitive!(u32);
primitive!(u64);
primitive!(f32);
pub struct BufferOffset<'a> {
pub buffer: &'a Buffer,
pub offset_in_bytes: usize,
}
impl<'a> BufferOffset<'a> {
pub fn zero_offset(buffer: &'a Buffer) -> Self {
Self {
buffer,
offset_in_bytes: 0,
}
}
}
impl<T> EncoderParam for &[T] {
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) {
encoder.set_bytes(
position,
core::mem::size_of_val(data) as u64,
data.as_ptr() as *const c_void,
);
}
}
impl EncoderParam for &Buffer {
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) {
encoder.set_buffer(position, Some(data), 0);
}
}
impl EncoderParam for (&Buffer, usize) {
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) {
encoder.set_buffer(position, Some(data.0), data.1 as u64);
}
}
impl EncoderParam for &BufferOffset<'_> {
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) {
encoder.set_buffer(position, Some(data.buffer), data.offset_in_bytes as u64);
}
}
impl EncoderParam for &mut Buffer {
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) {
encoder.set_buffer(position, Some(data), 0);
}
}
impl EncoderParam for (&mut Buffer, usize) {
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) {
encoder.set_buffer(position, Some(data.0), data.1 as u64);
}
}
#[macro_export]
macro_rules! set_params {
($encoder:ident, ($($param:expr),+)) => (
let mut _index = 0;
$(
$crate::utils::set_param($encoder, _index, $param);
_index += 1;
)*
);
}
pub trait EncoderProvider {
type Encoder<'a>: AsRef<metal::ComputeCommandEncoderRef>
where
Self: 'a;
fn encoder(&self) -> Self::Encoder<'_>;
}
pub struct WrappedEncoder<'a> {
inner: &'a ComputeCommandEncoderRef,
end_encoding_on_drop: bool,
}
impl Drop for WrappedEncoder<'_> {
fn drop(&mut self) {
if self.end_encoding_on_drop {
self.inner.end_encoding()
}
}
}
impl AsRef<metal::ComputeCommandEncoderRef> for WrappedEncoder<'_> {
fn as_ref(&self) -> &metal::ComputeCommandEncoderRef {
self.inner
}
}
impl EncoderProvider for &metal::CommandBuffer {
type Encoder<'a>
= WrappedEncoder<'a>
where
Self: 'a;
fn encoder(&self) -> Self::Encoder<'_> {
WrappedEncoder {
inner: self.new_compute_command_encoder(),
end_encoding_on_drop: true,
}
}
}
impl EncoderProvider for &metal::CommandBufferRef {
type Encoder<'a>
= WrappedEncoder<'a>
where
Self: 'a;
fn encoder(&self) -> Self::Encoder<'_> {
WrappedEncoder {
inner: self.new_compute_command_encoder(),
end_encoding_on_drop: true,
}
}
}
impl EncoderProvider for &ComputeCommandEncoderRef {
type Encoder<'a>
= WrappedEncoder<'a>
where
Self: 'a;
fn encoder(&self) -> Self::Encoder<'_> {
WrappedEncoder {
inner: self,
end_encoding_on_drop: false,
}
}
}
| candle/candle-metal-kernels/src/utils.rs/0 | {
"file_path": "candle/candle-metal-kernels/src/utils.rs",
"repo_id": "candle",
"token_count": 2819
} |
//! Convolution Layers.
use crate::BatchNorm;
use candle::{Result, Tensor};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Conv1dConfig {
pub padding: usize,
pub stride: usize,
pub dilation: usize,
pub groups: usize,
}
impl Default for Conv1dConfig {
fn default() -> Self {
Self {
padding: 0,
stride: 1,
dilation: 1,
groups: 1,
}
}
}
#[derive(Clone, Debug)]
pub struct Conv1d {
weight: Tensor,
bias: Option<Tensor>,
config: Conv1dConfig,
}
impl Conv1d {
pub fn new(weight: Tensor, bias: Option<Tensor>, config: Conv1dConfig) -> Self {
Self {
weight,
bias,
config,
}
}
pub fn config(&self) -> &Conv1dConfig {
&self.config
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn bias(&self) -> Option<&Tensor> {
self.bias.as_ref()
}
}
impl crate::Module for Conv1d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = x.conv1d(
&self.weight,
self.config.padding,
self.config.stride,
self.config.dilation,
self.config.groups,
)?;
match &self.bias {
None => Ok(x),
Some(bias) => {
let b = bias.dims1()?;
let bias = bias.reshape((1, b, 1))?;
Ok(x.broadcast_add(&bias)?)
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ConvTranspose1dConfig {
pub padding: usize,
pub output_padding: usize,
pub stride: usize,
pub dilation: usize,
pub groups: usize,
}
impl Default for ConvTranspose1dConfig {
fn default() -> Self {
Self {
padding: 0,
output_padding: 0,
stride: 1,
dilation: 1,
groups: 1,
}
}
}
#[derive(Clone, Debug)]
pub struct ConvTranspose1d {
weight: Tensor,
bias: Option<Tensor>,
config: ConvTranspose1dConfig,
}
impl ConvTranspose1d {
pub fn new(weight: Tensor, bias: Option<Tensor>, config: ConvTranspose1dConfig) -> Self {
Self {
weight,
bias,
config,
}
}
pub fn config(&self) -> &ConvTranspose1dConfig {
&self.config
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn bias(&self) -> Option<&Tensor> {
self.bias.as_ref()
}
}
impl crate::Module for ConvTranspose1d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = x.conv_transpose1d(
&self.weight,
self.config.padding,
self.config.output_padding,
self.config.stride,
self.config.dilation,
self.config.groups,
)?;
match &self.bias {
None => Ok(x),
Some(bias) => {
let b = bias.dims1()?;
let bias = bias.reshape((1, b, 1))?;
Ok(x.broadcast_add(&bias)?)
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Conv2dConfig {
pub padding: usize,
pub stride: usize,
pub dilation: usize,
pub groups: usize,
}
impl Default for Conv2dConfig {
fn default() -> Self {
Self {
padding: 0,
stride: 1,
dilation: 1,
groups: 1,
}
}
}
#[derive(Clone, Debug)]
pub struct Conv2d {
weight: Tensor,
bias: Option<Tensor>,
config: Conv2dConfig,
}
impl Conv2d {
pub fn new(weight: Tensor, bias: Option<Tensor>, config: Conv2dConfig) -> Self {
Self {
weight,
bias,
config,
}
}
pub fn config(&self) -> &Conv2dConfig {
&self.config
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn bias(&self) -> Option<&Tensor> {
self.bias.as_ref()
}
pub fn absorb_bn(&self, bn: &BatchNorm) -> Result<Self> {
if let Some((w_bn, b_bn)) = bn.weight_and_bias() {
let std_ = w_bn.div(&((bn.running_var() + bn.eps())?.sqrt()?))?;
let weight = self
.weight()
.broadcast_mul(&(std_.reshape((self.weight().dims4()?.0, 1, 1, 1))?))?;
let bias = match &self.bias {
None => b_bn.sub(&(std_.mul(bn.running_mean())?))?,
Some(bias) => b_bn.add(&(std_.mul(&bias.sub(bn.running_mean())?)?))?,
};
Ok(Self {
weight,
bias: Some(bias),
config: self.config,
})
} else {
candle::bail!("batch norm does not have weight_and_bias")
}
}
}
impl crate::Module for Conv2d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = x.conv2d(
&self.weight,
self.config.padding,
self.config.stride,
self.config.dilation,
self.config.groups,
)?;
match &self.bias {
None => Ok(x),
Some(bias) => {
let b = bias.dims1()?;
let bias = bias.reshape((1, b, 1, 1))?;
Ok(x.broadcast_add(&bias)?)
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ConvTranspose2dConfig {
pub padding: usize,
pub output_padding: usize,
pub stride: usize,
pub dilation: usize,
// TODO: support groups.
}
impl Default for ConvTranspose2dConfig {
fn default() -> Self {
Self {
padding: 0,
output_padding: 0,
stride: 1,
dilation: 1,
}
}
}
#[derive(Clone, Debug)]
pub struct ConvTranspose2d {
weight: Tensor,
bias: Option<Tensor>,
config: ConvTranspose2dConfig,
}
impl ConvTranspose2d {
pub fn new(weight: Tensor, bias: Option<Tensor>, config: ConvTranspose2dConfig) -> Self {
Self {
weight,
bias,
config,
}
}
pub fn config(&self) -> &ConvTranspose2dConfig {
&self.config
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn bias(&self) -> Option<&Tensor> {
self.bias.as_ref()
}
}
impl crate::Module for ConvTranspose2d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = x.conv_transpose2d(
&self.weight,
self.config.padding,
self.config.output_padding,
self.config.stride,
self.config.dilation,
)?;
match &self.bias {
None => Ok(x),
Some(bias) => {
let b = bias.dims1()?;
let bias = bias.reshape((1, b, 1, 1))?;
Ok(x.broadcast_add(&bias)?)
}
}
}
}
pub fn conv1d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: Conv1dConfig,
vb: crate::VarBuilder,
) -> Result<Conv1d> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vb.get_with_hints(
(out_channels, in_channels / cfg.groups, kernel_size),
"weight",
init_ws,
)?;
let bound = 1. / (in_channels as f64).sqrt();
let init_bs = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let bs = vb.get_with_hints(out_channels, "bias", init_bs)?;
Ok(Conv1d::new(ws, Some(bs), cfg))
}
pub fn conv1d_no_bias(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: Conv1dConfig,
vb: crate::VarBuilder,
) -> Result<Conv1d> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vb.get_with_hints(
(out_channels, in_channels / cfg.groups, kernel_size),
"weight",
init_ws,
)?;
Ok(Conv1d::new(ws, None, cfg))
}
pub fn conv_transpose1d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: ConvTranspose1dConfig,
vb: crate::VarBuilder,
) -> Result<ConvTranspose1d> {
let bound = 1. / (out_channels as f64 * kernel_size as f64).sqrt();
let init = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let ws = vb.get_with_hints(
(in_channels, out_channels / cfg.groups, kernel_size),
"weight",
init,
)?;
let bs = vb.get_with_hints(out_channels, "bias", init)?;
Ok(ConvTranspose1d::new(ws, Some(bs), cfg))
}
pub fn conv_transpose1d_no_bias(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: ConvTranspose1dConfig,
vb: crate::VarBuilder,
) -> Result<ConvTranspose1d> {
let bound = 1. / (out_channels as f64 * kernel_size as f64).sqrt();
let init = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let ws = vb.get_with_hints(
(in_channels, out_channels / cfg.groups, kernel_size),
"weight",
init,
)?;
Ok(ConvTranspose1d::new(ws, None, cfg))
}
pub fn conv2d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: Conv2dConfig,
vb: crate::VarBuilder,
) -> Result<Conv2d> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vb.get_with_hints(
(
out_channels,
in_channels / cfg.groups,
kernel_size,
kernel_size,
),
"weight",
init_ws,
)?;
let bound = 1. / (in_channels as f64).sqrt();
let init_bs = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let bs = vb.get_with_hints(out_channels, "bias", init_bs)?;
Ok(Conv2d::new(ws, Some(bs), cfg))
}
pub fn conv2d_no_bias(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: Conv2dConfig,
vb: crate::VarBuilder,
) -> Result<Conv2d> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vb.get_with_hints(
(
out_channels,
in_channels / cfg.groups,
kernel_size,
kernel_size,
),
"weight",
init_ws,
)?;
Ok(Conv2d::new(ws, None, cfg))
}
pub fn conv_transpose2d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: ConvTranspose2dConfig,
vb: crate::VarBuilder,
) -> Result<ConvTranspose2d> {
let bound = 1. / (out_channels as f64).sqrt() / kernel_size as f64;
let init = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let ws = vb.get_with_hints(
(in_channels, out_channels, kernel_size, kernel_size),
"weight",
init,
)?;
let bs = vb.get_with_hints(out_channels, "bias", init)?;
Ok(ConvTranspose2d::new(ws, Some(bs), cfg))
}
pub fn conv_transpose2d_no_bias(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: ConvTranspose2dConfig,
vb: crate::VarBuilder,
) -> Result<ConvTranspose2d> {
let bound = 1. / (out_channels as f64).sqrt() / kernel_size as f64;
let init = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let ws = vb.get_with_hints(
(in_channels, out_channels, kernel_size, kernel_size),
"weight",
init,
)?;
Ok(ConvTranspose2d::new(ws, None, cfg))
}
| candle/candle-nn/src/conv.rs/0 | {
"file_path": "candle/candle-nn/src/conv.rs",
"repo_id": "candle",
"token_count": 5891
} |
//! A `VarBuilder` for variable retrieval from models
//!
//! A `VarBuilder` is used to retrieve variables used by a model. These variables can either come
//! from a pre-trained checkpoint, e.g. using `VarBuilder::from_mmaped_safetensors`, or initialized
//! for training, e.g. using `VarBuilder::from_varmap`.
use crate::VarMap;
use candle::{safetensors::Load, DType, Device, Error, Result, Shape, Tensor};
use safetensors::{slice::IndexOp, tensor::SafeTensors};
use std::collections::HashMap;
use std::sync::Arc;
/// A structure used to retrieve variables, these variables can either come from storage or be
/// generated via some form of initialization.
///
/// The way to retrieve variables is defined in the backend embedded in the `VarBuilder`.
pub struct VarBuilderArgs<'a, B: Backend> {
data: Arc<TensorData<B>>,
path: Vec<String>,
pub dtype: DType,
_phantom: std::marker::PhantomData<&'a B>,
}
impl<B: Backend> Clone for VarBuilderArgs<'_, B> {
fn clone(&self) -> Self {
Self {
data: self.data.clone(),
path: self.path.clone(),
dtype: self.dtype,
_phantom: self._phantom,
}
}
}
/// A simple `VarBuilder`, this is less generic than `VarBuilderArgs` but should cover most common
/// use cases.
pub type VarBuilder<'a> = VarBuilderArgs<'a, Box<dyn SimpleBackend + 'a>>;
struct TensorData<B: Backend> {
backend: B,
pub device: Device,
}
/// A trait that defines how tensor data is retrieved.
///
/// Typically this would use disk storage in some specific format, or random initialization.
/// Note that there is a specialized version of this trait (`SimpleBackend`) that can be used most
/// of the time. The main restriction is that it doesn't allow for specific args (besides
/// initialization hints).
pub trait Backend: Send + Sync {
type Hints: Default;
/// Retrieve a tensor with some target shape.
fn get(
&self,
s: Shape,
name: &str,
h: Self::Hints,
dtype: DType,
dev: &Device,
) -> Result<Tensor>;
fn contains_tensor(&self, name: &str) -> bool;
}
pub trait SimpleBackend: Send + Sync {
/// Retrieve a tensor based on a target name and shape.
fn get(
&self,
s: Shape,
name: &str,
h: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor>;
fn contains_tensor(&self, name: &str) -> bool;
}
impl Backend for Box<dyn SimpleBackend + '_> {
type Hints = crate::Init;
fn get(
&self,
s: Shape,
name: &str,
h: Self::Hints,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
self.as_ref().get(s, name, h, dtype, dev)
}
fn contains_tensor(&self, name: &str) -> bool {
self.as_ref().contains_tensor(name)
}
}
impl<B: Backend> VarBuilderArgs<'_, B> {
pub fn new_with_args(backend: B, dtype: DType, dev: &Device) -> Self {
let data = TensorData {
backend,
device: dev.clone(),
};
Self {
data: Arc::new(data),
path: vec![],
dtype,
_phantom: std::marker::PhantomData,
}
}
/// Returns the prefix of the `VarBuilder`.
pub fn prefix(&self) -> String {
self.path.join(".")
}
/// Returns a new `VarBuilder` using the root path.
pub fn root(&self) -> Self {
Self {
data: self.data.clone(),
path: vec![],
dtype: self.dtype,
_phantom: std::marker::PhantomData,
}
}
/// Returns a new `VarBuilder` with the prefix set to `prefix`.
pub fn set_prefix(&self, prefix: impl ToString) -> Self {
Self {
data: self.data.clone(),
path: vec![prefix.to_string()],
dtype: self.dtype,
_phantom: std::marker::PhantomData,
}
}
/// Return a new `VarBuilder` adding `s` to the current prefix. This can be think of as `cd`
/// into a directory.
pub fn push_prefix<S: ToString>(&self, s: S) -> Self {
let mut path = self.path.clone();
path.push(s.to_string());
Self {
data: self.data.clone(),
path,
dtype: self.dtype,
_phantom: std::marker::PhantomData,
}
}
/// Short alias for `push_prefix`.
pub fn pp<S: ToString>(&self, s: S) -> Self {
self.push_prefix(s)
}
/// The device used by default.
pub fn device(&self) -> &Device {
&self.data.device
}
/// The dtype used by default.
pub fn dtype(&self) -> DType {
self.dtype
}
/// Clone the VarBuilder tweaking its dtype
pub fn to_dtype(&self, dtype: DType) -> Self {
Self {
data: self.data.clone(),
path: self.path.clone(),
dtype,
_phantom: std::marker::PhantomData,
}
}
fn path(&self, tensor_name: &str) -> String {
if self.path.is_empty() {
tensor_name.to_string()
} else {
[&self.path.join("."), tensor_name].join(".")
}
}
/// This returns true only if a tensor with the passed in name is available. E.g. when passed
/// `a`, true is returned if `prefix.a` exists but false is returned if only `prefix.a.b`
/// exists.
pub fn contains_tensor(&self, tensor_name: &str) -> bool {
let path = self.path(tensor_name);
self.data.backend.contains_tensor(&path)
}
/// Retrieve the tensor associated with the given name at the current path.
pub fn get_with_hints<S: Into<Shape>>(
&self,
s: S,
name: &str,
hints: B::Hints,
) -> Result<Tensor> {
self.get_with_hints_dtype(s, name, hints, self.dtype)
}
/// Retrieve the tensor associated with the given name at the current path.
pub fn get<S: Into<Shape>>(&self, s: S, name: &str) -> Result<Tensor> {
self.get_with_hints(s, name, Default::default())
}
/// Retrieve the tensor associated with the given name & dtype at the current path.
pub fn get_with_hints_dtype<S: Into<Shape>>(
&self,
s: S,
name: &str,
hints: B::Hints,
dtype: DType,
) -> Result<Tensor> {
let path = self.path(name);
self.data
.backend
.get(s.into(), &path, hints, dtype, &self.data.device)
}
}
struct Zeros;
impl SimpleBackend for Zeros {
fn get(&self, s: Shape, _: &str, _: crate::Init, dtype: DType, dev: &Device) -> Result<Tensor> {
Tensor::zeros(s, dtype, dev)
}
fn contains_tensor(&self, _name: &str) -> bool {
true
}
}
impl SimpleBackend for HashMap<String, Tensor> {
fn get(
&self,
s: Shape,
name: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = self
.get(name)
.ok_or_else(|| {
Error::CannotFindTensor {
path: name.to_string(),
}
.bt()
})?
.clone();
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {name}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
tensor.to_device(dev)?.to_dtype(dtype)
}
fn contains_tensor(&self, name: &str) -> bool {
self.contains_key(name)
}
}
impl SimpleBackend for VarMap {
fn get(
&self,
s: Shape,
name: &str,
h: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
VarMap::get(self, s, name, h, dtype, dev)
}
fn contains_tensor(&self, name: &str) -> bool {
self.data().lock().unwrap().contains_key(name)
}
}
#[allow(dead_code)]
pub struct SafeTensorWithRouting<'a> {
routing: HashMap<String, usize>,
safetensors: Vec<SafeTensors<'a>>,
}
impl SimpleBackend for SafeTensorWithRouting<'_> {
fn get(
&self,
s: Shape,
path: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let index = self.routing.get(path).ok_or_else(|| {
Error::CannotFindTensor {
path: path.to_string(),
}
.bt()
})?;
let tensor = self.safetensors[*index]
.tensor(path)?
.load(dev)?
.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {path}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.routing.contains_key(name)
}
}
impl SimpleBackend for candle::npy::NpzTensors {
fn get(
&self,
s: Shape,
path: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = match self.get(path)? {
None => Err(Error::CannotFindTensor {
path: path.to_string(),
}
.bt())?,
Some(tensor) => tensor,
};
let tensor = tensor.to_device(dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {path}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok_and(|v| v.is_some())
}
}
impl SimpleBackend for candle::pickle::PthTensors {
fn get(
&self,
s: Shape,
path: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = match self.get(path)? {
None => Err(Error::CannotFindTensor {
path: path.to_string(),
}
.bt())?,
Some(tensor) => tensor,
};
let tensor = tensor.to_device(dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {path}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok_and(|v| v.is_some())
}
}
impl SimpleBackend for candle::safetensors::MmapedSafetensors {
fn get(
&self,
s: Shape,
name: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = self.load(name, dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {name}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok()
}
}
impl SimpleBackend for candle::safetensors::BufferedSafetensors {
fn get(
&self,
s: Shape,
name: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = self.load(name, dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {name}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok()
}
}
impl SimpleBackend for candle::safetensors::SliceSafetensors<'_> {
fn get(
&self,
s: Shape,
name: &str,
_: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let tensor = self.load(name, dev)?.to_dtype(dtype)?;
if tensor.shape() != &s {
Err(candle::Error::UnexpectedShape {
msg: format!("shape mismatch for {name}"),
expected: s,
got: tensor.shape().clone(),
}
.bt())?
}
Ok(tensor)
}
fn contains_tensor(&self, name: &str) -> bool {
self.get(name).is_ok()
}
}
impl<'a> VarBuilder<'a> {
/// Initializes a `VarBuilder` using a custom backend.
///
/// It is preferred to use one of the more specific constructors. This
/// constructor is provided to allow downstream users to define their own
/// backends.
pub fn from_backend(
backend: Box<dyn SimpleBackend + 'a>,
dtype: DType,
device: Device,
) -> Self {
let data = TensorData { backend, device };
Self {
data: Arc::new(data),
path: vec![],
dtype,
_phantom: std::marker::PhantomData,
}
}
/// Initializes a `VarBuilder` that uses zeros for any tensor.
pub fn zeros(dtype: DType, dev: &Device) -> Self {
Self::from_backend(Box::new(Zeros), dtype, dev.clone())
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a hashtable. An error is
/// returned if no tensor is available under the requested path or on shape mismatches.
pub fn from_tensors(ts: HashMap<String, Tensor>, dtype: DType, dev: &Device) -> Self {
Self::from_backend(Box::new(ts), dtype, dev.clone())
}
/// Initializes a `VarBuilder` using a `VarMap`. The requested tensors are created and
/// initialized on new paths, the same tensor is used if the same path is requested multiple
/// times. This is commonly used when initializing a model before training.
///
/// Note that it is possible to load the tensor values after model creation using the `load`
/// method on `varmap`, this can be used to start model training from an existing checkpoint.
pub fn from_varmap(varmap: &VarMap, dtype: DType, dev: &Device) -> Self {
Self::from_backend(Box::new(varmap.clone()), dtype, dev.clone())
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a collection of safetensors
/// files.
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn from_mmaped_safetensors<P: AsRef<std::path::Path>>(
paths: &[P],
dtype: DType,
dev: &Device,
) -> Result<Self> {
let tensors = candle::safetensors::MmapedSafetensors::multi(paths)?;
Ok(Self::from_backend(Box::new(tensors), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` from a binary buffer in the safetensor format.
pub fn from_buffered_safetensors(data: Vec<u8>, dtype: DType, dev: &Device) -> Result<Self> {
let tensors = candle::safetensors::BufferedSafetensors::new(data)?;
Ok(Self::from_backend(Box::new(tensors), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` from a binary slice in the safetensor format.
pub fn from_slice_safetensors(data: &'a [u8], dtype: DType, dev: &Device) -> Result<Self> {
let tensors = candle::safetensors::SliceSafetensors::new(data)?;
Ok(Self::from_backend(Box::new(tensors), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a numpy npz file.
pub fn from_npz<P: AsRef<std::path::Path>>(p: P, dtype: DType, dev: &Device) -> Result<Self> {
let npz = candle::npy::NpzTensors::new(p)?;
Ok(Self::from_backend(Box::new(npz), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a pytorch pth file.
pub fn from_pth<P: AsRef<std::path::Path>>(p: P, dtype: DType, dev: &Device) -> Result<Self> {
let pth = candle::pickle::PthTensors::new(p, None)?;
Ok(Self::from_backend(Box::new(pth), dtype, dev.clone()))
}
/// Initializes a `VarBuilder` that retrieves tensors stored in a pytorch pth file.
/// similar to [`from_pth`] but requires a `state_key`.
pub fn from_pth_with_state<P: AsRef<std::path::Path>>(
p: P,
dtype: DType,
state_key: &str,
dev: &Device,
) -> Result<Self> {
let pth = candle::pickle::PthTensors::new(p, Some(state_key))?;
Ok(Self::from_backend(Box::new(pth), dtype, dev.clone()))
}
/// Gets a VarBuilder that applies some renaming function on tensor it gets queried for before
/// passing the new names to the inner VarBuilder.
///
/// ```rust
/// use candle::{Tensor, DType, Device};
///
/// let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?;
/// let tensors: std::collections::HashMap<_, _> = [
/// ("foo".to_string(), a),
/// ]
/// .into_iter()
/// .collect();
/// let vb = candle_nn::VarBuilder::from_tensors(tensors, DType::F32, &Device::Cpu);
/// assert!(vb.contains_tensor("foo"));
/// assert!(vb.get((2, 3), "foo").is_ok());
/// assert!(!vb.contains_tensor("bar"));
/// let vb = vb.rename_f(|f: &str| if f == "bar" { "foo".to_string() } else { f.to_string() });
/// assert!(vb.contains_tensor("bar"));
/// assert!(vb.contains_tensor("foo"));
/// assert!(vb.get((2, 3), "bar").is_ok());
/// assert!(vb.get((2, 3), "foo").is_ok());
/// assert!(!vb.contains_tensor("baz"));
/// # Ok::<(), candle::Error>(())
/// ```
pub fn rename_f<F: Fn(&str) -> String + Sync + Send + 'static>(self, f: F) -> Self {
let f: Box<dyn Fn(&str) -> String + Sync + Send + 'static> = Box::new(f);
self.rename(f)
}
pub fn rename<R: Renamer + Send + Sync + 'a>(self, renamer: R) -> Self {
let dtype = self.dtype();
let device = self.device().clone();
let path = self.path.clone();
let backend = Rename::new(self, renamer);
let backend: Box<dyn SimpleBackend + 'a> = Box::new(backend);
let data = TensorData { backend, device };
Self {
data: Arc::new(data),
dtype,
path,
_phantom: std::marker::PhantomData,
}
}
}
pub struct ShardedSafeTensors(candle::safetensors::MmapedSafetensors);
pub type ShardedVarBuilder<'a> = VarBuilderArgs<'a, ShardedSafeTensors>;
impl ShardedSafeTensors {
/// Initializes a `VarBuilder` that retrieves tensors stored in a collection of safetensors
/// files and make them usable in a sharded way.
///
/// # Safety
///
/// The unsafe is inherited from [`memmap2::MmapOptions`].
pub unsafe fn var_builder<P: AsRef<std::path::Path>>(
paths: &[P],
dtype: DType,
dev: &Device,
) -> Result<ShardedVarBuilder<'static>> {
let tensors = candle::safetensors::MmapedSafetensors::multi(paths)?;
let backend = ShardedSafeTensors(tensors);
Ok(VarBuilderArgs::new_with_args(backend, dtype, dev))
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct Shard {
pub dim: usize,
pub rank: usize,
pub world_size: usize,
}
impl Default for Shard {
fn default() -> Self {
Self {
dim: 0,
rank: 0,
world_size: 1,
}
}
}
/// Get part of a tensor, typically used to do Tensor Parallelism sharding.
///
/// If the tensor is of size (1024, 1024).
///
/// `dim` corresponds to the dimension to slice into
/// `rank` is the rank of the current process
/// `world_size` is the total number of ranks in the process group
///
/// `get_sharded("tensor", 0, 0, 2)` means `tensor.i((..512))`
/// `get_sharded("tensor", 0, 1, 2)` means `tensor.i((512..))`
/// `get_sharded("tensor", 1, 0, 2)` means `tensor.i((.., ..512))`
impl Backend for ShardedSafeTensors {
type Hints = Shard;
fn get(
&self,
target_shape: Shape, // The size is only checked when the world size is 1.
path: &str,
h: Self::Hints,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
if h.world_size == 1 {
// There is no sharding to be applied here so we use the default backend to speed
// things up.
return SimpleBackend::get(&self.0, target_shape, path, Default::default(), dtype, dev);
}
let Shard {
dim,
rank,
world_size,
} = h;
let view = self.0.get(path)?;
let view_dtype = view.dtype();
let mut shape = view.shape().to_vec();
let size = shape[dim];
if size % world_size != 0 {
return Err(Error::ShapeMismatchSplit {
shape: shape.into(),
dim,
n_parts: world_size,
});
}
let block_size = size / world_size;
let start = rank * block_size;
let stop = (rank + 1) * block_size;
// Everything is expressed in tensor dimension
// bytes offsets is handled automatically for safetensors.
let iterator = if dim == 0 {
view.slice(start..stop).map_err(|_| {
Error::Msg(format!(
"Cannot slice tensor {path} ({shape:?} along dim {dim} with {start}..{stop}"
))
})?
} else if dim == 1 {
view.slice((.., start..stop)).map_err(|_| {
Error::Msg(format!(
"Cannot slice tensor {path} ({shape:?} along dim {dim} with {start}..{stop}"
))
})?
} else {
candle::bail!("Get sharded on dimensions != 0 or 1")
};
shape[dim] = block_size;
let view_dtype: DType = view_dtype.try_into()?;
let raw: Vec<u8> = iterator.into_iter().flatten().cloned().collect();
Tensor::from_raw_buffer(&raw, view_dtype, &shape, dev)?.to_dtype(dtype)
}
fn contains_tensor(&self, name: &str) -> bool {
self.0.get(name).is_ok()
}
}
/// This traits specifies a way to rename the queried names into names that are stored in an inner
/// VarBuilder.
pub trait Renamer {
/// This is applied to the name obtained by a name call and the resulting name is passed to the
/// inner VarBuilder.
fn rename(&self, v: &str) -> std::borrow::Cow<'_, str>;
}
pub struct Rename<'a, R: Renamer> {
inner: VarBuilder<'a>,
renamer: R,
}
impl<R: Renamer + Sync + Send> SimpleBackend for Rename<'_, R> {
fn get(
&self,
s: Shape,
name: &str,
h: crate::Init,
dtype: DType,
dev: &Device,
) -> Result<Tensor> {
let name = self.renamer.rename(name);
self.inner
.get_with_hints_dtype(s, &name, h, dtype)?
.to_device(dev)
}
fn contains_tensor(&self, name: &str) -> bool {
let name = self.renamer.rename(name);
self.inner.contains_tensor(&name)
}
}
impl<'a, R: Renamer> Rename<'a, R> {
pub fn new(inner: VarBuilder<'a>, renamer: R) -> Self {
Self { inner, renamer }
}
}
impl Renamer for Box<dyn Fn(&str) -> String + Sync + Send> {
fn rename(&self, v: &str) -> std::borrow::Cow<'_, str> {
std::borrow::Cow::Owned(self(v))
}
}
| candle/candle-nn/src/var_builder.rs/0 | {
"file_path": "candle/candle-nn/src/var_builder.rs",
"repo_id": "candle",
"token_count": 11054
} |
use candle::Result;
use prost::Message;
pub mod onnx {
include!(concat!(env!("OUT_DIR"), "/onnx.rs"));
}
pub mod eval;
pub use eval::{dtype, simple_eval};
pub fn read_file<P: AsRef<std::path::Path>>(p: P) -> Result<onnx::ModelProto> {
let buf = std::fs::read(p)?;
onnx::ModelProto::decode(buf.as_slice()).map_err(candle::Error::wrap)
}
| candle/candle-onnx/src/lib.rs/0 | {
"file_path": "candle/candle-onnx/src/lib.rs",
"repo_id": "candle",
"token_count": 154
} |
use std::collections::HashMap;
use crate::utils::wrap_err;
use crate::{PyDType, PyTensor};
use candle_onnx::eval::{dtype, get_tensor, simple_eval};
use candle_onnx::onnx::tensor_proto::DataType;
use candle_onnx::onnx::tensor_shape_proto::dimension::Value;
use candle_onnx::onnx::type_proto::{Tensor as ONNXTensor, Value as ONNXValue};
use candle_onnx::onnx::{ModelProto, ValueInfoProto};
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use pyo3::types::{PyList, PyTuple};
#[derive(Clone, Debug)]
#[pyclass(name = "ONNXTensorDescription")]
/// A wrapper around an ONNX tensor description.
pub struct PyONNXTensorDescriptor(ONNXTensor);
#[pymethods]
impl PyONNXTensorDescriptor {
#[getter]
/// The data type of the tensor.
/// &RETURNS&: DType
fn dtype(&self) -> PyResult<PyDType> {
match DataType::try_from(self.0.elem_type) {
Ok(dt) => match dtype(dt) {
Some(dt) => Ok(PyDType(dt)),
None => Err(PyValueError::new_err(format!(
"unsupported 'value' data-type {dt:?}"
))),
},
type_ => Err(PyValueError::new_err(format!(
"unsupported input type {type_:?}"
))),
}
}
#[getter]
/// The shape of the tensor.
/// &RETURNS&: Tuple[Union[int,str,Any]]
fn shape(&self, py: Python) -> PyResult<Py<PyTuple>> {
let shape = PyList::empty_bound(py);
if let Some(d) = &self.0.shape {
for dim in d.dim.iter() {
if let Some(value) = &dim.value {
match value {
Value::DimValue(v) => shape.append(*v)?,
Value::DimParam(s) => shape.append(s.clone())?,
};
} else {
return Err(PyValueError::new_err("None value in shape"));
}
}
}
Ok(shape.to_tuple().into())
}
fn __repr__(&self, py: Python) -> String {
match (self.shape(py), self.dtype()) {
(Ok(shape), Ok(dtype)) => format!(
"TensorDescriptor[shape: {:?}, dtype: {:?}]",
shape.to_string(),
dtype.__str__()
),
(Err(_), Err(_)) => "TensorDescriptor[shape: unknown, dtype: unknown]".to_string(),
(Err(_), Ok(dtype)) => format!(
"TensorDescriptor[shape: unknown, dtype: {:?}]",
dtype.__str__()
),
(Ok(shape), Err(_)) => format!(
"TensorDescriptor[shape: {:?}, dtype: unknown]",
shape.to_string()
),
}
}
fn __str__(&self, py: Python) -> String {
self.__repr__(py)
}
}
#[derive(Clone, Debug)]
#[pyclass(name = "ONNXModel")]
/// A wrapper around an ONNX model.
pub struct PyONNXModel(ModelProto);
fn extract_tensor_descriptions(
value_infos: &[ValueInfoProto],
) -> HashMap<String, PyONNXTensorDescriptor> {
let mut map = HashMap::new();
for value_info in value_infos.iter() {
let input_type = match &value_info.r#type {
Some(input_type) => input_type,
None => continue,
};
let input_type = match &input_type.value {
Some(input_type) => input_type,
None => continue,
};
let tensor_type: &ONNXTensor = match input_type {
ONNXValue::TensorType(tt) => tt,
_ => continue,
};
map.insert(
value_info.name.to_string(),
PyONNXTensorDescriptor(tensor_type.clone()),
);
}
map
}
#[pymethods]
impl PyONNXModel {
#[new]
#[pyo3(text_signature = "(self, path:str)")]
/// Load an ONNX model from the given path.
fn new(path: String) -> PyResult<Self> {
let model: ModelProto = candle_onnx::read_file(path).map_err(wrap_err)?;
Ok(PyONNXModel(model))
}
#[getter]
/// The version of the IR this model targets.
/// &RETURNS&: int
fn ir_version(&self) -> i64 {
self.0.ir_version
}
#[getter]
/// The producer of the model.
/// &RETURNS&: str
fn producer_name(&self) -> String {
self.0.producer_name.clone()
}
#[getter]
/// The version of the producer of the model.
/// &RETURNS&: str
fn producer_version(&self) -> String {
self.0.producer_version.clone()
}
#[getter]
/// The domain of the operator set of the model.
/// &RETURNS&: str
fn domain(&self) -> String {
self.0.domain.clone()
}
#[getter]
/// The version of the model.
/// &RETURNS&: int
fn model_version(&self) -> i64 {
self.0.model_version
}
#[getter]
/// The doc string of the model.
/// &RETURNS&: str
fn doc_string(&self) -> String {
self.0.doc_string.clone()
}
/// Get the weights of the model.
/// &RETURNS&: Dict[str, Tensor]
fn initializers(&self) -> PyResult<HashMap<String, PyTensor>> {
let mut map = HashMap::new();
if let Some(graph) = self.0.graph.as_ref() {
for tensor_description in graph.initializer.iter() {
let tensor = get_tensor(tensor_description, tensor_description.name.as_str())
.map_err(wrap_err)?;
map.insert(tensor_description.name.to_string(), PyTensor(tensor));
}
}
Ok(map)
}
#[getter]
/// The inputs of the model.
/// &RETURNS&: Optional[Dict[str, ONNXTensorDescription]]
fn inputs(&self) -> Option<HashMap<String, PyONNXTensorDescriptor>> {
if let Some(graph) = self.0.graph.as_ref() {
return Some(extract_tensor_descriptions(&graph.input));
}
None
}
#[getter]
/// The outputs of the model.
/// &RETURNS&: Optional[Dict[str, ONNXTensorDescription]]
fn outputs(&self) -> Option<HashMap<String, PyONNXTensorDescriptor>> {
if let Some(graph) = self.0.graph.as_ref() {
return Some(extract_tensor_descriptions(&graph.output));
}
None
}
#[pyo3(text_signature = "(self, inputs:Dict[str,Tensor])")]
/// Run the model on the given inputs.
/// &RETURNS&: Dict[str,Tensor]
fn run(&self, inputs: HashMap<String, PyTensor>) -> PyResult<HashMap<String, PyTensor>> {
let unwrapped_tensors = inputs.into_iter().map(|(k, v)| (k.clone(), v.0)).collect();
let result = simple_eval(&self.0, unwrapped_tensors).map_err(wrap_err)?;
Ok(result
.into_iter()
.map(|(k, v)| (k.clone(), PyTensor(v)))
.collect())
}
}
| candle/candle-pyo3/src/onnx.rs/0 | {
"file_path": "candle/candle-pyo3/src/onnx.rs",
"repo_id": "candle",
"token_count": 3268
} |
//! ConvMixer implementation.
//!
//! See "Patches Are All You Need?" by Trockman et al. 2022
//!
//! - 📝 [Arxiv](https://arxiv.org/abs/2201.09792)
//! - 💻 [Github](https://github.com/locuslab/convmixer)
//!
use candle::Result;
use candle_nn::{batch_norm, Conv2dConfig, Module, VarBuilder};
#[allow(clippy::many_single_char_names)]
fn conv2d_same(
i: usize,
o: usize,
k: usize,
c: Conv2dConfig,
vb: VarBuilder,
) -> Result<impl Module> {
let conv2d = candle_nn::conv2d(i, o, k, c, vb)?;
let s = c.stride;
let module = candle_nn::func(move |xs| {
let ih = xs.dim(2)?;
let iw = xs.dim(3)?;
let oh = ih.div_ceil(s);
let ow = iw.div_ceil(s);
let pad_h = usize::max((oh - 1) * s + k - ih, 0);
let pad_w = usize::max((ow - 1) * s + k - iw, 0);
if pad_h > 0 || pad_w > 0 {
xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)?
.pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)?
.apply(&conv2d)
} else {
xs.apply(&conv2d)
}
});
Ok(module)
}
fn block(dim: usize, kernel_size: usize, vb: VarBuilder) -> Result<impl Module> {
let conv2d_cfg = Conv2dConfig {
groups: dim,
..Default::default()
};
let vb_fn = vb.pp(0).pp("fn");
let conv1 = conv2d_same(dim, dim, kernel_size, conv2d_cfg, vb_fn.pp(0))?;
let bn1 = batch_norm(dim, 1e-5, vb_fn.pp(2))?;
let conv2 = candle_nn::conv2d(dim, dim, 1, Default::default(), vb.pp(1))?;
let bn2 = batch_norm(dim, 1e-5, vb.pp(3))?;
Ok(candle_nn::func(move |xs| {
let ys = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?;
(xs + ys)?.apply(&conv2)?.gelu_erf()?.apply_t(&bn2, false)
}))
}
fn convmixer(
nclasses: usize,
dim: usize,
depth: usize,
kernel_size: usize,
patch_size: usize,
vb: VarBuilder,
) -> Result<candle_nn::Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: patch_size,
..Default::default()
};
let conv1 = candle_nn::conv2d(3, dim, patch_size, conv2d_cfg, vb.pp(0))?;
let bn1 = batch_norm(dim, 1e-5, vb.pp(2))?;
let blocks: Vec<_> = (0..depth)
.map(|index| block(dim, kernel_size, vb.pp(3 + index)))
.collect::<Result<Vec<_>>>()?;
let fc = candle_nn::linear(dim, nclasses, vb.pp(25))?;
Ok(candle_nn::func(move |xs| {
let mut xs = xs.apply(&conv1)?.gelu_erf()?.apply_t(&bn1, false)?;
for block in blocks.iter() {
xs = xs.apply(block)?
}
// This performs the adaptive average pooling with a target size of (1, 1).
xs.mean(3)?.mean(2)?.apply(&fc)
}))
}
pub fn c1536_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> {
convmixer(nclasses, 1536, 20, 9, 7, vb)
}
pub fn c1024_20(nclasses: usize, vb: VarBuilder) -> Result<candle_nn::Func<'static>> {
convmixer(nclasses, 1024, 20, 9, 14, vb)
}
| candle/candle-transformers/src/models/convmixer.rs/0 | {
"file_path": "candle/candle-transformers/src/models/convmixer.rs",
"repo_id": "candle",
"token_count": 1504
} |
use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::{LayerNorm, Linear, RmsNorm, VarBuilder};
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/model.py#L12
#[derive(Debug, Clone)]
pub struct Config {
pub in_channels: usize,
pub vec_in_dim: usize,
pub context_in_dim: usize,
pub hidden_size: usize,
pub mlp_ratio: f64,
pub num_heads: usize,
pub depth: usize,
pub depth_single_blocks: usize,
pub axes_dim: Vec<usize>,
pub theta: usize,
pub qkv_bias: bool,
pub guidance_embed: bool,
}
impl Config {
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L32
pub fn dev() -> Self {
Self {
in_channels: 64,
vec_in_dim: 768,
context_in_dim: 4096,
hidden_size: 3072,
mlp_ratio: 4.0,
num_heads: 24,
depth: 19,
depth_single_blocks: 38,
axes_dim: vec![16, 56, 56],
theta: 10_000,
qkv_bias: true,
guidance_embed: true,
}
}
// https://github.com/black-forest-labs/flux/blob/727e3a71faf37390f318cf9434f0939653302b60/src/flux/util.py#L64
pub fn schnell() -> Self {
Self {
in_channels: 64,
vec_in_dim: 768,
context_in_dim: 4096,
hidden_size: 3072,
mlp_ratio: 4.0,
num_heads: 24,
depth: 19,
depth_single_blocks: 38,
axes_dim: vec![16, 56, 56],
theta: 10_000,
qkv_bias: true,
guidance_embed: false,
}
}
}
fn layer_norm(dim: usize, vb: VarBuilder) -> Result<LayerNorm> {
let ws = Tensor::ones(dim, vb.dtype(), vb.device())?;
Ok(LayerNorm::new_no_bias(ws, 1e-6))
}
fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let dim = q.dim(D::Minus1)?;
let scale_factor = 1.0 / (dim as f64).sqrt();
let mut batch_dims = q.dims().to_vec();
batch_dims.pop();
batch_dims.pop();
let q = q.flatten_to(batch_dims.len() - 1)?;
let k = k.flatten_to(batch_dims.len() - 1)?;
let v = v.flatten_to(batch_dims.len() - 1)?;
let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?;
let attn_scores = candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(&v)?;
batch_dims.push(attn_scores.dim(D::Minus2)?);
batch_dims.push(attn_scores.dim(D::Minus1)?);
attn_scores.reshape(batch_dims)
}
fn rope(pos: &Tensor, dim: usize, theta: usize) -> Result<Tensor> {
if dim % 2 == 1 {
candle::bail!("dim {dim} is odd")
}
let dev = pos.device();
let theta = theta as f64;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, 1, inv_freq_len), dev)?;
let inv_freq = inv_freq.to_dtype(pos.dtype())?;
let freqs = pos.unsqueeze(2)?.broadcast_mul(&inv_freq)?;
let cos = freqs.cos()?;
let sin = freqs.sin()?;
let out = Tensor::stack(&[&cos, &sin.neg()?, &sin, &cos], 3)?;
let (b, n, d, _ij) = out.dims4()?;
out.reshape((b, n, d, 2, 2))
}
fn apply_rope(x: &Tensor, freq_cis: &Tensor) -> Result<Tensor> {
let dims = x.dims();
let (b_sz, n_head, seq_len, n_embd) = x.dims4()?;
let x = x.reshape((b_sz, n_head, seq_len, n_embd / 2, 2))?;
let x0 = x.narrow(D::Minus1, 0, 1)?;
let x1 = x.narrow(D::Minus1, 1, 1)?;
let fr0 = freq_cis.get_on_dim(D::Minus1, 0)?;
let fr1 = freq_cis.get_on_dim(D::Minus1, 1)?;
(fr0.broadcast_mul(&x0)? + fr1.broadcast_mul(&x1)?)?.reshape(dims.to_vec())
}
pub(crate) fn attention(q: &Tensor, k: &Tensor, v: &Tensor, pe: &Tensor) -> Result<Tensor> {
let q = apply_rope(q, pe)?.contiguous()?;
let k = apply_rope(k, pe)?.contiguous()?;
let x = scaled_dot_product_attention(&q, &k, v)?;
x.transpose(1, 2)?.flatten_from(2)
}
pub(crate) fn timestep_embedding(t: &Tensor, dim: usize, dtype: DType) -> Result<Tensor> {
const TIME_FACTOR: f64 = 1000.;
const MAX_PERIOD: f64 = 10000.;
if dim % 2 == 1 {
candle::bail!("{dim} is odd")
}
let dev = t.device();
let half = dim / 2;
let t = (t * TIME_FACTOR)?;
let arange = Tensor::arange(0, half as u32, dev)?.to_dtype(candle::DType::F32)?;
let freqs = (arange * (-MAX_PERIOD.ln() / half as f64))?.exp()?;
let args = t
.unsqueeze(1)?
.to_dtype(candle::DType::F32)?
.broadcast_mul(&freqs.unsqueeze(0)?)?;
let emb = Tensor::cat(&[args.cos()?, args.sin()?], D::Minus1)?.to_dtype(dtype)?;
Ok(emb)
}
#[derive(Debug, Clone)]
pub struct EmbedNd {
#[allow(unused)]
dim: usize,
theta: usize,
axes_dim: Vec<usize>,
}
impl EmbedNd {
pub fn new(dim: usize, theta: usize, axes_dim: Vec<usize>) -> Self {
Self {
dim,
theta,
axes_dim,
}
}
}
impl candle::Module for EmbedNd {
fn forward(&self, ids: &Tensor) -> Result<Tensor> {
let n_axes = ids.dim(D::Minus1)?;
let mut emb = Vec::with_capacity(n_axes);
for idx in 0..n_axes {
let r = rope(
&ids.get_on_dim(D::Minus1, idx)?,
self.axes_dim[idx],
self.theta,
)?;
emb.push(r)
}
let emb = Tensor::cat(&emb, 2)?;
emb.unsqueeze(1)
}
}
#[derive(Debug, Clone)]
pub struct MlpEmbedder {
in_layer: Linear,
out_layer: Linear,
}
impl MlpEmbedder {
fn new(in_sz: usize, h_sz: usize, vb: VarBuilder) -> Result<Self> {
let in_layer = candle_nn::linear(in_sz, h_sz, vb.pp("in_layer"))?;
let out_layer = candle_nn::linear(h_sz, h_sz, vb.pp("out_layer"))?;
Ok(Self {
in_layer,
out_layer,
})
}
}
impl candle::Module for MlpEmbedder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.in_layer)?.silu()?.apply(&self.out_layer)
}
}
#[derive(Debug, Clone)]
pub struct QkNorm {
query_norm: RmsNorm,
key_norm: RmsNorm,
}
impl QkNorm {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let query_norm = vb.get(dim, "query_norm.scale")?;
let query_norm = RmsNorm::new(query_norm, 1e-6);
let key_norm = vb.get(dim, "key_norm.scale")?;
let key_norm = RmsNorm::new(key_norm, 1e-6);
Ok(Self {
query_norm,
key_norm,
})
}
}
struct ModulationOut {
shift: Tensor,
scale: Tensor,
gate: Tensor,
}
impl ModulationOut {
fn scale_shift(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&(&self.scale + 1.)?)?
.broadcast_add(&self.shift)
}
fn gate(&self, xs: &Tensor) -> Result<Tensor> {
self.gate.broadcast_mul(xs)
}
}
#[derive(Debug, Clone)]
struct Modulation1 {
lin: Linear,
}
impl Modulation1 {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let lin = candle_nn::linear(dim, 3 * dim, vb.pp("lin"))?;
Ok(Self { lin })
}
fn forward(&self, vec_: &Tensor) -> Result<ModulationOut> {
let ys = vec_
.silu()?
.apply(&self.lin)?
.unsqueeze(1)?
.chunk(3, D::Minus1)?;
if ys.len() != 3 {
candle::bail!("unexpected len from chunk {ys:?}")
}
Ok(ModulationOut {
shift: ys[0].clone(),
scale: ys[1].clone(),
gate: ys[2].clone(),
})
}
}
#[derive(Debug, Clone)]
struct Modulation2 {
lin: Linear,
}
impl Modulation2 {
fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let lin = candle_nn::linear(dim, 6 * dim, vb.pp("lin"))?;
Ok(Self { lin })
}
fn forward(&self, vec_: &Tensor) -> Result<(ModulationOut, ModulationOut)> {
let ys = vec_
.silu()?
.apply(&self.lin)?
.unsqueeze(1)?
.chunk(6, D::Minus1)?;
if ys.len() != 6 {
candle::bail!("unexpected len from chunk {ys:?}")
}
let mod1 = ModulationOut {
shift: ys[0].clone(),
scale: ys[1].clone(),
gate: ys[2].clone(),
};
let mod2 = ModulationOut {
shift: ys[3].clone(),
scale: ys[4].clone(),
gate: ys[5].clone(),
};
Ok((mod1, mod2))
}
}
#[derive(Debug, Clone)]
pub struct SelfAttention {
qkv: Linear,
norm: QkNorm,
proj: Linear,
num_heads: usize,
}
impl SelfAttention {
fn new(dim: usize, num_heads: usize, qkv_bias: bool, vb: VarBuilder) -> Result<Self> {
let head_dim = dim / num_heads;
let qkv = candle_nn::linear_b(dim, dim * 3, qkv_bias, vb.pp("qkv"))?;
let norm = QkNorm::new(head_dim, vb.pp("norm"))?;
let proj = candle_nn::linear(dim, dim, vb.pp("proj"))?;
Ok(Self {
qkv,
norm,
proj,
num_heads,
})
}
fn qkv(&self, xs: &Tensor) -> Result<(Tensor, Tensor, Tensor)> {
let qkv = xs.apply(&self.qkv)?;
let (b, l, _khd) = qkv.dims3()?;
let qkv = qkv.reshape((b, l, 3, self.num_heads, ()))?;
let q = qkv.i((.., .., 0))?.transpose(1, 2)?;
let k = qkv.i((.., .., 1))?.transpose(1, 2)?;
let v = qkv.i((.., .., 2))?.transpose(1, 2)?;
let q = q.apply(&self.norm.query_norm)?;
let k = k.apply(&self.norm.key_norm)?;
Ok((q, k, v))
}
#[allow(unused)]
fn forward(&self, xs: &Tensor, pe: &Tensor) -> Result<Tensor> {
let (q, k, v) = self.qkv(xs)?;
attention(&q, &k, &v, pe)?.apply(&self.proj)
}
}
#[derive(Debug, Clone)]
struct Mlp {
lin1: Linear,
lin2: Linear,
}
impl Mlp {
fn new(in_sz: usize, mlp_sz: usize, vb: VarBuilder) -> Result<Self> {
let lin1 = candle_nn::linear(in_sz, mlp_sz, vb.pp("0"))?;
let lin2 = candle_nn::linear(mlp_sz, in_sz, vb.pp("2"))?;
Ok(Self { lin1, lin2 })
}
}
impl candle::Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.lin1)?.gelu()?.apply(&self.lin2)
}
}
#[derive(Debug, Clone)]
pub struct DoubleStreamBlock {
img_mod: Modulation2,
img_norm1: LayerNorm,
img_attn: SelfAttention,
img_norm2: LayerNorm,
img_mlp: Mlp,
txt_mod: Modulation2,
txt_norm1: LayerNorm,
txt_attn: SelfAttention,
txt_norm2: LayerNorm,
txt_mlp: Mlp,
}
impl DoubleStreamBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h_sz = cfg.hidden_size;
let mlp_sz = (h_sz as f64 * cfg.mlp_ratio) as usize;
let img_mod = Modulation2::new(h_sz, vb.pp("img_mod"))?;
let img_norm1 = layer_norm(h_sz, vb.pp("img_norm1"))?;
let img_attn = SelfAttention::new(h_sz, cfg.num_heads, cfg.qkv_bias, vb.pp("img_attn"))?;
let img_norm2 = layer_norm(h_sz, vb.pp("img_norm2"))?;
let img_mlp = Mlp::new(h_sz, mlp_sz, vb.pp("img_mlp"))?;
let txt_mod = Modulation2::new(h_sz, vb.pp("txt_mod"))?;
let txt_norm1 = layer_norm(h_sz, vb.pp("txt_norm1"))?;
let txt_attn = SelfAttention::new(h_sz, cfg.num_heads, cfg.qkv_bias, vb.pp("txt_attn"))?;
let txt_norm2 = layer_norm(h_sz, vb.pp("txt_norm2"))?;
let txt_mlp = Mlp::new(h_sz, mlp_sz, vb.pp("txt_mlp"))?;
Ok(Self {
img_mod,
img_norm1,
img_attn,
img_norm2,
img_mlp,
txt_mod,
txt_norm1,
txt_attn,
txt_norm2,
txt_mlp,
})
}
fn forward(
&self,
img: &Tensor,
txt: &Tensor,
vec_: &Tensor,
pe: &Tensor,
) -> Result<(Tensor, Tensor)> {
let (img_mod1, img_mod2) = self.img_mod.forward(vec_)?; // shift, scale, gate
let (txt_mod1, txt_mod2) = self.txt_mod.forward(vec_)?; // shift, scale, gate
let img_modulated = img.apply(&self.img_norm1)?;
let img_modulated = img_mod1.scale_shift(&img_modulated)?;
let (img_q, img_k, img_v) = self.img_attn.qkv(&img_modulated)?;
let txt_modulated = txt.apply(&self.txt_norm1)?;
let txt_modulated = txt_mod1.scale_shift(&txt_modulated)?;
let (txt_q, txt_k, txt_v) = self.txt_attn.qkv(&txt_modulated)?;
let q = Tensor::cat(&[txt_q, img_q], 2)?;
let k = Tensor::cat(&[txt_k, img_k], 2)?;
let v = Tensor::cat(&[txt_v, img_v], 2)?;
let attn = attention(&q, &k, &v, pe)?;
let txt_attn = attn.narrow(1, 0, txt.dim(1)?)?;
let img_attn = attn.narrow(1, txt.dim(1)?, attn.dim(1)? - txt.dim(1)?)?;
let img = (img + img_mod1.gate(&img_attn.apply(&self.img_attn.proj)?))?;
let img = (&img
+ img_mod2.gate(
&img_mod2
.scale_shift(&img.apply(&self.img_norm2)?)?
.apply(&self.img_mlp)?,
)?)?;
let txt = (txt + txt_mod1.gate(&txt_attn.apply(&self.txt_attn.proj)?))?;
let txt = (&txt
+ txt_mod2.gate(
&txt_mod2
.scale_shift(&txt.apply(&self.txt_norm2)?)?
.apply(&self.txt_mlp)?,
)?)?;
Ok((img, txt))
}
}
#[derive(Debug, Clone)]
pub struct SingleStreamBlock {
linear1: Linear,
linear2: Linear,
norm: QkNorm,
pre_norm: LayerNorm,
modulation: Modulation1,
h_sz: usize,
mlp_sz: usize,
num_heads: usize,
}
impl SingleStreamBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h_sz = cfg.hidden_size;
let mlp_sz = (h_sz as f64 * cfg.mlp_ratio) as usize;
let head_dim = h_sz / cfg.num_heads;
let linear1 = candle_nn::linear(h_sz, h_sz * 3 + mlp_sz, vb.pp("linear1"))?;
let linear2 = candle_nn::linear(h_sz + mlp_sz, h_sz, vb.pp("linear2"))?;
let norm = QkNorm::new(head_dim, vb.pp("norm"))?;
let pre_norm = layer_norm(h_sz, vb.pp("pre_norm"))?;
let modulation = Modulation1::new(h_sz, vb.pp("modulation"))?;
Ok(Self {
linear1,
linear2,
norm,
pre_norm,
modulation,
h_sz,
mlp_sz,
num_heads: cfg.num_heads,
})
}
fn forward(&self, xs: &Tensor, vec_: &Tensor, pe: &Tensor) -> Result<Tensor> {
let mod_ = self.modulation.forward(vec_)?;
let x_mod = mod_.scale_shift(&xs.apply(&self.pre_norm)?)?;
let x_mod = x_mod.apply(&self.linear1)?;
let qkv = x_mod.narrow(D::Minus1, 0, 3 * self.h_sz)?;
let (b, l, _khd) = qkv.dims3()?;
let qkv = qkv.reshape((b, l, 3, self.num_heads, ()))?;
let q = qkv.i((.., .., 0))?.transpose(1, 2)?;
let k = qkv.i((.., .., 1))?.transpose(1, 2)?;
let v = qkv.i((.., .., 2))?.transpose(1, 2)?;
let mlp = x_mod.narrow(D::Minus1, 3 * self.h_sz, self.mlp_sz)?;
let q = q.apply(&self.norm.query_norm)?;
let k = k.apply(&self.norm.key_norm)?;
let attn = attention(&q, &k, &v, pe)?;
let output = Tensor::cat(&[attn, mlp.gelu()?], 2)?.apply(&self.linear2)?;
xs + mod_.gate(&output)
}
}
#[derive(Debug, Clone)]
pub struct LastLayer {
norm_final: LayerNorm,
linear: Linear,
ada_ln_modulation: Linear,
}
impl LastLayer {
fn new(h_sz: usize, p_sz: usize, out_c: usize, vb: VarBuilder) -> Result<Self> {
let norm_final = layer_norm(h_sz, vb.pp("norm_final"))?;
let linear = candle_nn::linear(h_sz, p_sz * p_sz * out_c, vb.pp("linear"))?;
let ada_ln_modulation = candle_nn::linear(h_sz, 2 * h_sz, vb.pp("adaLN_modulation.1"))?;
Ok(Self {
norm_final,
linear,
ada_ln_modulation,
})
}
fn forward(&self, xs: &Tensor, vec: &Tensor) -> Result<Tensor> {
let chunks = vec.silu()?.apply(&self.ada_ln_modulation)?.chunk(2, 1)?;
let (shift, scale) = (&chunks[0], &chunks[1]);
let xs = xs
.apply(&self.norm_final)?
.broadcast_mul(&(scale.unsqueeze(1)? + 1.0)?)?
.broadcast_add(&shift.unsqueeze(1)?)?;
xs.apply(&self.linear)
}
}
#[derive(Debug, Clone)]
pub struct Flux {
img_in: Linear,
txt_in: Linear,
time_in: MlpEmbedder,
vector_in: MlpEmbedder,
guidance_in: Option<MlpEmbedder>,
pe_embedder: EmbedNd,
double_blocks: Vec<DoubleStreamBlock>,
single_blocks: Vec<SingleStreamBlock>,
final_layer: LastLayer,
}
impl Flux {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let img_in = candle_nn::linear(cfg.in_channels, cfg.hidden_size, vb.pp("img_in"))?;
let txt_in = candle_nn::linear(cfg.context_in_dim, cfg.hidden_size, vb.pp("txt_in"))?;
let mut double_blocks = Vec::with_capacity(cfg.depth);
let vb_d = vb.pp("double_blocks");
for idx in 0..cfg.depth {
let db = DoubleStreamBlock::new(cfg, vb_d.pp(idx))?;
double_blocks.push(db)
}
let mut single_blocks = Vec::with_capacity(cfg.depth_single_blocks);
let vb_s = vb.pp("single_blocks");
for idx in 0..cfg.depth_single_blocks {
let sb = SingleStreamBlock::new(cfg, vb_s.pp(idx))?;
single_blocks.push(sb)
}
let time_in = MlpEmbedder::new(256, cfg.hidden_size, vb.pp("time_in"))?;
let vector_in = MlpEmbedder::new(cfg.vec_in_dim, cfg.hidden_size, vb.pp("vector_in"))?;
let guidance_in = if cfg.guidance_embed {
let mlp = MlpEmbedder::new(256, cfg.hidden_size, vb.pp("guidance_in"))?;
Some(mlp)
} else {
None
};
let final_layer =
LastLayer::new(cfg.hidden_size, 1, cfg.in_channels, vb.pp("final_layer"))?;
let pe_dim = cfg.hidden_size / cfg.num_heads;
let pe_embedder = EmbedNd::new(pe_dim, cfg.theta, cfg.axes_dim.to_vec());
Ok(Self {
img_in,
txt_in,
time_in,
vector_in,
guidance_in,
pe_embedder,
double_blocks,
single_blocks,
final_layer,
})
}
}
impl super::WithForward for Flux {
#[allow(clippy::too_many_arguments)]
fn forward(
&self,
img: &Tensor,
img_ids: &Tensor,
txt: &Tensor,
txt_ids: &Tensor,
timesteps: &Tensor,
y: &Tensor,
guidance: Option<&Tensor>,
) -> Result<Tensor> {
if txt.rank() != 3 {
candle::bail!("unexpected shape for txt {:?}", txt.shape())
}
if img.rank() != 3 {
candle::bail!("unexpected shape for img {:?}", img.shape())
}
let dtype = img.dtype();
let pe = {
let ids = Tensor::cat(&[txt_ids, img_ids], 1)?;
ids.apply(&self.pe_embedder)?
};
let mut txt = txt.apply(&self.txt_in)?;
let mut img = img.apply(&self.img_in)?;
let vec_ = timestep_embedding(timesteps, 256, dtype)?.apply(&self.time_in)?;
let vec_ = match (self.guidance_in.as_ref(), guidance) {
(Some(g_in), Some(guidance)) => {
(vec_ + timestep_embedding(guidance, 256, dtype)?.apply(g_in))?
}
_ => vec_,
};
let vec_ = (vec_ + y.apply(&self.vector_in))?;
// Double blocks
for block in self.double_blocks.iter() {
(img, txt) = block.forward(&img, &txt, &vec_, &pe)?
}
// Single blocks
let mut img = Tensor::cat(&[&txt, &img], 1)?;
for block in self.single_blocks.iter() {
img = block.forward(&img, &vec_, &pe)?;
}
let img = img.i((.., txt.dim(1)?..))?;
self.final_layer.forward(&img, &vec_)
}
}
| candle/candle-transformers/src/models/flux/model.rs/0 | {
"file_path": "candle/candle-transformers/src/models/flux/model.rs",
"repo_id": "candle",
"token_count": 10740
} |
//! Mamba inference implementation.
//!
//! See ["Mamba: Linear-Time Sequence Modeling with Selective State Spaces"](https://arxiv.org/abs/2312.00752)
//!
//! Based on reference implementation from the AlbertMamba project
//! A fast implementation of mamba for inference only.
//! Based on Laurent Mazare's rust implementation: [mamba.rs](https://github.com/LaurentMazare/mamba.rs)
use crate::models::with_tracing::{linear, linear_no_bias, Linear};
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{RmsNorm, VarBuilder};
const D_CONV: usize = 4;
const D_STATE: usize = 16;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub d_model: usize,
pub n_layer: usize,
pub vocab_size: usize,
pub pad_vocab_size_multiple: usize,
}
impl Config {
fn vocab_size(&self) -> usize {
let pad = self.pad_vocab_size_multiple;
self.vocab_size.div_ceil(pad) * pad
}
fn dt_rank(&self) -> usize {
(self.d_model + 15) / 16
}
fn d_inner(&self) -> usize {
self.d_model * 2
}
}
pub struct State {
pub hs: Vec<Tensor>,
pub prev_xs: Vec<[Tensor; D_CONV]>,
pub pos: usize,
}
impl State {
pub fn new(batch_size: usize, cfg: &Config, dtype: DType, device: &Device) -> Result<Self> {
let mut hs = Vec::with_capacity(cfg.n_layer);
let mut prev_xs = Vec::with_capacity(cfg.n_layer);
for _i in 0..cfg.n_layer {
let h = Tensor::zeros((batch_size, cfg.d_inner(), D_STATE), dtype, device)?;
let x = Tensor::zeros((batch_size, cfg.d_inner()), dtype, device)?;
hs.push(h);
prev_xs.push([x.clone(), x.clone(), x.clone(), x.clone()]);
}
Ok(Self {
hs,
prev_xs,
pos: 0,
})
}
}
#[derive(Clone, Debug)]
pub struct MambaBlock {
in_proj: Linear,
conv1d_bias: Tensor,
conv1d_weights: [Tensor; D_CONV],
x_proj: Linear,
dt_proj: Linear,
a_log: Tensor,
d: Tensor,
out_proj: Linear,
dt_rank: usize,
layer_index: usize,
d_inner: usize,
}
impl MambaBlock {
pub fn new(layer_index: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let d_inner = cfg.d_inner();
let dt_rank = cfg.dt_rank();
let in_proj = linear_no_bias(cfg.d_model, d_inner * 2, vb.pp("in_proj"))?;
let x_proj = linear_no_bias(d_inner, dt_rank + D_STATE * 2, vb.pp("x_proj"))?;
let dt_proj = linear(dt_rank, d_inner, vb.pp("dt_proj"))?;
let a_log = vb.get((d_inner, D_STATE), "A_log")?;
let d = vb.get(d_inner, "D")?;
let out_proj = linear_no_bias(d_inner, cfg.d_model, vb.pp("out_proj"))?;
let conv1d_bias = vb.get(d_inner, "conv1d.bias")?;
let conv1d_weight = vb.get((d_inner, 1, D_CONV), "conv1d.weight")?;
let conv1d_weights = [
conv1d_weight.i((.., 0, 0))?,
conv1d_weight.i((.., 0, 1))?,
conv1d_weight.i((.., 0, 2))?,
conv1d_weight.i((.., 0, 3))?,
];
Ok(Self {
in_proj,
conv1d_bias,
conv1d_weights,
x_proj,
dt_proj,
a_log,
d,
out_proj,
dt_rank,
layer_index,
d_inner,
})
}
pub fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
let (b_sz, _dim) = xs.dims2()?;
let li = self.layer_index;
let mut xs = xs.apply(&self.in_proj)?.chunk(2, D::Minus1)?;
let proj_for_silu = xs.remove(1);
state.prev_xs[li][state.pos % D_CONV] = xs.remove(0);
let mut proj_for_conv = self.conv1d_bias.broadcast_as((b_sz, self.d_inner))?;
for d_c in 0..D_CONV {
proj_for_conv = (proj_for_conv
+ self.conv1d_weights[d_c]
.broadcast_mul(&state.prev_xs[li][(d_c + 1 + state.pos) % D_CONV])?)?;
}
let proj_for_conv = candle_nn::ops::silu(&proj_for_conv)?;
// SSM + Selection, we're doing inference here so only need the last step of
// the sequence.
// Algorithm 3.2 on page 6, https://arxiv.org/pdf/2312.00752.pdf
let x_proj = self.x_proj.forward(&proj_for_conv)?;
let delta = x_proj.narrow(D::Minus1, 0, self.dt_rank)?.contiguous()?;
let b = x_proj.narrow(D::Minus1, self.dt_rank, D_STATE)?;
let c = x_proj.narrow(D::Minus1, self.dt_rank + D_STATE, D_STATE)?;
let delta = delta.apply(&self.dt_proj)?;
// softplus
let delta = (delta.exp()? + 1.)?.log()?;
let a = self.a_log.to_dtype(delta.dtype())?.exp()?.neg()?;
let d = self.d.to_dtype(delta.dtype())?;
// Selective scan part
// Eqn (2a), page 3, h_t = Ab h_{t-1} + Bb x_t
let delta = delta
.unsqueeze(D::Minus1)?
.broadcast_as((b_sz, self.d_inner, D_STATE))?;
let a = a.broadcast_as((b_sz, self.d_inner, D_STATE))?;
let b = b.broadcast_as((b_sz, self.d_inner, D_STATE))?;
let proj_for_conv_b =
proj_for_conv
.unsqueeze(D::Minus1)?
.broadcast_as((b_sz, self.d_inner, D_STATE))?;
state.hs[li] = ((&state.hs[li] * (&delta * &a)?.exp()?)? + &delta * &b * &proj_for_conv_b)?;
let ss = (state.hs[li]
.matmul(&c.unsqueeze(D::Minus1)?)?
.squeeze(D::Minus1)?
+ proj_for_conv.broadcast_mul(&d)?)?;
let ys = (ss * candle_nn::ops::silu(&proj_for_silu))?;
ys.apply(&self.out_proj)
}
}
#[derive(Clone, Debug)]
pub struct ResidualBlock {
mixer: MambaBlock,
norm: RmsNorm,
}
impl ResidualBlock {
pub fn new(layer_index: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let norm = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm"))?;
let mixer = MambaBlock::new(layer_index, cfg, vb.pp("mixer"))?;
Ok(Self { mixer, norm })
}
fn forward(&self, xs: &Tensor, state: &mut State) -> Result<Tensor> {
self.mixer.forward(&xs.apply(&self.norm)?, state)? + xs
}
}
// https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L56
#[derive(Clone, Debug)]
pub struct Model {
embedding: candle_nn::Embedding,
layers: Vec<ResidualBlock>,
norm_f: RmsNorm,
lm_head: Linear,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embedding = candle_nn::embedding(cfg.vocab_size(), cfg.d_model, vb.pp("embedding"))?;
let mut layers = Vec::with_capacity(cfg.n_layer);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.n_layer {
let layer = ResidualBlock::new(layer_idx, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm_f = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm_f"))?;
let lm_head = Linear::from_weights(embedding.embeddings().clone(), None);
Ok(Self {
embedding,
layers,
norm_f,
lm_head,
dtype: vb.dtype(),
})
}
pub fn forward(&self, input_ids: &Tensor, state: &mut State) -> Result<Tensor> {
let _b_size = input_ids.dims1()?;
let mut xs = self.embedding.forward(input_ids)?;
for layer in self.layers.iter() {
xs = layer.forward(&xs, state)?
}
state.pos += 1;
xs.apply(&self.norm_f)?.apply(&self.lm_head)
}
pub fn dtype(&self) -> DType {
self.dtype
}
}
| candle/candle-transformers/src/models/mamba.rs/0 | {
"file_path": "candle/candle-transformers/src/models/mamba.rs",
"repo_id": "candle",
"token_count": 3923
} |
use candle::{Module, Result, Tensor};
use candle_nn as nn;
pub struct Qkv {
pub q: Tensor,
pub k: Tensor,
pub v: Tensor,
}
pub struct Mlp {
fc1: nn::Linear,
act: nn::Activation,
fc2: nn::Linear,
}
impl Mlp {
pub fn new(
in_features: usize,
hidden_features: usize,
vb: candle_nn::VarBuilder,
) -> Result<Self> {
let fc1 = nn::linear(in_features, hidden_features, vb.pp("fc1"))?;
let act = nn::Activation::GeluPytorchTanh;
let fc2 = nn::linear(hidden_features, in_features, vb.pp("fc2"))?;
Ok(Self { fc1, act, fc2 })
}
}
impl Module for Mlp {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x = self.fc1.forward(x)?;
let x = self.act.forward(&x)?;
self.fc2.forward(&x)
}
}
pub struct QkvOnlyAttnProjections {
qkv: nn::Linear,
head_dim: usize,
}
impl QkvOnlyAttnProjections {
pub fn new(dim: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let head_dim = dim / num_heads;
let qkv = nn::linear(dim, dim * 3, vb.pp("qkv"))?;
Ok(Self { qkv, head_dim })
}
pub fn pre_attention(&self, x: &Tensor) -> Result<Qkv> {
let qkv = self.qkv.forward(x)?;
split_qkv(&qkv, self.head_dim)
}
}
pub struct AttnProjections {
head_dim: usize,
qkv: nn::Linear,
ln_k: Option<candle_nn::RmsNorm>,
ln_q: Option<candle_nn::RmsNorm>,
proj: nn::Linear,
}
impl AttnProjections {
pub fn new(dim: usize, num_heads: usize, vb: nn::VarBuilder) -> Result<Self> {
let head_dim = dim / num_heads;
let qkv = nn::linear(dim, dim * 3, vb.pp("qkv"))?;
let proj = nn::linear(dim, dim, vb.pp("proj"))?;
let (ln_k, ln_q) = if vb.contains_tensor("ln_k.weight") {
let ln_k = candle_nn::rms_norm(head_dim, 1e-6, vb.pp("ln_k"))?;
let ln_q = candle_nn::rms_norm(head_dim, 1e-6, vb.pp("ln_q"))?;
(Some(ln_k), Some(ln_q))
} else {
(None, None)
};
Ok(Self {
head_dim,
qkv,
proj,
ln_k,
ln_q,
})
}
pub fn pre_attention(&self, x: &Tensor) -> Result<Qkv> {
let qkv = self.qkv.forward(x)?;
let Qkv { q, k, v } = split_qkv(&qkv, self.head_dim)?;
let q = match self.ln_q.as_ref() {
None => q,
Some(l) => {
let (b, t, h) = q.dims3()?;
l.forward(&q.reshape((b, t, (), self.head_dim))?)?
.reshape((b, t, h))?
}
};
let k = match self.ln_k.as_ref() {
None => k,
Some(l) => {
let (b, t, h) = k.dims3()?;
l.forward(&k.reshape((b, t, (), self.head_dim))?)?
.reshape((b, t, h))?
}
};
Ok(Qkv { q, k, v })
}
pub fn post_attention(&self, x: &Tensor) -> Result<Tensor> {
self.proj.forward(x)
}
}
fn split_qkv(qkv: &Tensor, head_dim: usize) -> Result<Qkv> {
let (batch_size, seq_len, _) = qkv.dims3()?;
let qkv = qkv.reshape((batch_size, seq_len, 3, (), head_dim))?;
let q = qkv.get_on_dim(2, 0)?;
let q = q.reshape((batch_size, seq_len, ()))?;
let k = qkv.get_on_dim(2, 1)?;
let k = k.reshape((batch_size, seq_len, ()))?;
let v = qkv.get_on_dim(2, 2)?;
Ok(Qkv { q, k, v })
}
| candle/candle-transformers/src/models/mmdit/projections.rs/0 | {
"file_path": "candle/candle-transformers/src/models/mmdit/projections.rs",
"repo_id": "candle",
"token_count": 1917
} |
//! Persimmon Model
//!
//! A transformer language model for efficient inference and general-purpose tasks. The model uses a standard transformer architecture with:
//! - Layer normalization for Q/K attention
//! - RoPE embeddings with partial rotary factor
//! - ReLU activation
//! - Separate number of attention heads and KV heads
//!
//! References:
//! - 💻 [Hugging Face Implementation](https://github.com/huggingface/transformers/blob/main/src/transformers/models/persimmon/modeling_persimmon.py)
//! - 💻 [Persimmon Config](https://github.com/huggingface/transformers/blob/main/src/transformers/models/persimmon/configuration_persimmon.py)
//! - 🤗 [Hugging Face](https://huggingface.co/adept/persimmon-8b-base)
//!
use candle::DType;
use serde::Deserialize;
pub const DTYPE: DType = DType::F32;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PositionEmbeddingType {
Absolute,
Alibi,
}
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/persimmon/configuration_persimmon.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub hidden_act: candle_nn::Activation,
pub max_position_embeddings: usize,
pub initializer_range: f64,
pub layer_norm_eps: f64,
pub rms_norm_eps: f64,
pub use_cache: bool,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub qk_layernorm: bool,
pub partial_rotary_factor: f64,
}
impl Config {
pub fn base_8b() -> Self {
// https://huggingface.co/adept/persimmon-8b-base/blob/main/config.json
Self {
hidden_act: candle_nn::Activation::Relu,
hidden_size: 4096,
initializer_range: 0.02,
intermediate_size: 16384,
layer_norm_eps: 1e-05,
max_position_embeddings: 16384,
num_attention_heads: 64,
num_hidden_layers: 36,
num_key_value_heads: 64,
qk_layernorm: true,
rms_norm_eps: 1e-06,
rope_theta: 25000.0,
tie_word_embeddings: false,
use_cache: true,
vocab_size: 262144,
partial_rotary_factor: 0.5,
}
}
}
| candle/candle-transformers/src/models/persimmon.rs/0 | {
"file_path": "candle/candle-transformers/src/models/persimmon.rs",
"repo_id": "candle",
"token_count": 1045
} |
//! Phi3 model implementation with quantization support.
//!
//! Phi3 is a language model intended for research purposes.
//! This implementation provides quantization for reduced memory usage.
//!
//! Key characteristics:
//! - Multi-head attention
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for quantization
//!
//! References:
//! - [Model Card](https://huggingface.co/microsoft/phi-3)
//!
use std::collections::HashMap;
use candle::quantized::gguf_file;
use candle::quantized::QTensor;
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{kv_cache::KvCache, Embedding, RmsNorm};
#[derive(Debug, Clone)]
struct QLinear {
inner: candle::quantized::QMatMul,
span: tracing::Span,
}
impl QLinear {
fn new<R: std::io::Read + std::io::Seek>(
ct: &gguf_file::Content,
r: &mut R,
name: &str,
device: &Device,
) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
let w = ct.tensor(r, &format!("{name}.weight"), device)?;
let inner = candle::quantized::QMatMul::from_qtensor(w)?;
Ok(Self { inner, span })
}
}
impl Module for QLinear {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
struct Mlp {
ffn_up: QLinear,
ffn_down: QLinear,
i_size: usize,
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let up_states = xs.apply(&self.ffn_up)?;
let gate = up_states.narrow(D::Minus1, 0, self.i_size)?;
let up_states = up_states.narrow(D::Minus1, self.i_size, self.i_size)?;
let up_states = (up_states * gate.silu()?)?;
up_states.apply(&self.ffn_down)
}
}
fn rms_norm(w: QTensor, eps: f64) -> Result<RmsNorm> {
let w = w.dequantize(&w.device())?;
let rms = RmsNorm::new(w, eps);
Ok(rms)
}
#[derive(Debug, Clone)]
struct LayerWeights {
attn_qkv: QLinear,
attn_output: QLinear,
attn_norm: RmsNorm,
ffn_norm: RmsNorm,
mlp: Mlp,
n_head: usize,
n_kv_head: usize,
head_dim: usize,
cos: Tensor,
sin: Tensor,
neg_inf: Tensor,
kv_cache: KvCache,
use_flash_attn: bool,
span_attn: tracing::Span,
span_rot: tracing::Span,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> {
let shape = mask.shape();
let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?;
Ok(m)
}
impl LayerWeights {
fn apply_rotary_emb(&self, xs: &Tensor, index_pos: usize) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _h, seq_len, _n_embd) = xs.dims4()?;
let cos = self.cos.narrow(0, index_pos, seq_len)?;
let sin = self.sin.narrow(0, index_pos, seq_len)?;
candle_nn::rotary_emb::rope(&xs.contiguous()?, &cos, &sin)
}
fn forward_attn(
&mut self,
x: &Tensor,
mask: Option<&Tensor>,
index_pos: usize,
) -> Result<Tensor> {
let _enter = self.span_attn.enter();
let (b_sz, seq_len, n_embd) = x.dims3()?;
let qkv = self.attn_qkv.forward(x)?;
let query_pos = self.n_head * self.head_dim;
let q = qkv.narrow(D::Minus1, 0, query_pos)?;
let k = qkv.narrow(D::Minus1, query_pos, self.n_kv_head * self.head_dim)?;
let v = qkv.narrow(
D::Minus1,
query_pos + self.n_kv_head * self.head_dim,
self.n_kv_head * self.head_dim,
)?;
let q = q
.reshape((b_sz, seq_len, self.n_head, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?;
let q = self.apply_rotary_emb(&q, index_pos)?.contiguous()?;
let k = self.apply_rotary_emb(&k, index_pos)?;
let (k, v) = self.kv_cache.append(&k.contiguous()?, &v.contiguous()?)?;
let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?;
let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?;
let y = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = q.to_dtype(DType::BF16)?.transpose(1, 2)?;
let k = k.to_dtype(DType::BF16)?.transpose(1, 2)?;
let v = v.to_dtype(DType::BF16)?.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)?
.to_dtype(DType::F32)?
.transpose(1, 2)?
} else {
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = match mask {
None => att,
Some(mask) => {
let mask = mask.broadcast_as(att.shape())?;
masked_fill(&att, &mask, &self.neg_inf)?
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v)?
};
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = self.attn_output.forward(&y)?;
Ok(y)
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
pub struct ModelWeights {
tok_embeddings: Embedding,
layers: Vec<LayerWeights>,
output_norm: RmsNorm,
output: QLinear,
masks: HashMap<usize, Tensor>,
span: tracing::Span,
span_output: tracing::Span,
}
fn precomput_freqs_cis(
head_dim: usize,
max_seq_len: usize,
freq_base: f32,
device: &Device,
) -> Result<(Tensor, Tensor)> {
let theta: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), device)?;
let idx_theta = Tensor::arange(0, max_seq_len as u32, device)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?;
let sin = idx_theta.sin()?;
Ok((cos, sin))
}
impl ModelWeights {
pub fn from_gguf<R: std::io::Seek + std::io::Read>(
use_flash_attn: bool,
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
) -> Result<Self> {
let md_get = |s: &str| match ct.metadata.get(s) {
None => candle::bail!("cannot find {s} in metadata"),
Some(v) => Ok(v),
};
// Parameter extraction from metadata.
let head_count = md_get("phi3.attention.head_count")?.to_u32()? as usize;
let head_count_kv = md_get("phi3.attention.head_count_kv")?.to_u32()? as usize;
let block_count = md_get("phi3.block_count")?.to_u32()? as usize;
let embedding_length = md_get("phi3.embedding_length")?.to_u32()? as usize;
let max_seq_len = md_get("phi3.context_length")?.to_u32()? as usize;
let head_dim = embedding_length / head_count;
let i_size = md_get("phi3.feed_forward_length")?.to_u32()? as usize;
let rope_dim = md_get("phi3.rope.dimension_count")?.to_u32()? as usize;
let rms_eps = md_get("phi3.attention.layer_norm_rms_epsilon")?.to_f32()? as f64;
let (cos, sin) = precomput_freqs_cis(rope_dim, max_seq_len, 10_000., device)?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?;
let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?;
let tok_embeddings = tok_embeddings.dequantize(device)?;
let output_norm = rms_norm(ct.tensor(reader, "output_norm.weight", device)?, rms_eps)?;
let output = QLinear::new(&ct, reader, "output", device)?;
let mut layers = Vec::with_capacity(block_count);
for layer_idx in 0..block_count {
let prefix = format!("blk.{layer_idx}");
let ffn_up = QLinear::new(&ct, reader, &format!("{prefix}.ffn_up"), device)?;
let ffn_down = QLinear::new(&ct, reader, &format!("{prefix}.ffn_down"), device)?;
let mlp = Mlp {
ffn_up,
ffn_down,
i_size,
};
let attn_norm = rms_norm(
ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?,
rms_eps,
)?;
let ffn_norm = rms_norm(
ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?,
rms_eps,
)?;
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let kv_cache = KvCache::new(2, max_seq_len);
layers.push(LayerWeights {
attn_qkv: QLinear::new(&ct, reader, &format!("{prefix}.attn_qkv"), device)?,
attn_output: QLinear::new(&ct, reader, &format!("{prefix}.attn_output"), device)?,
attn_norm,
ffn_norm,
mlp,
n_head: head_count,
n_kv_head: head_count_kv,
head_dim,
cos: cos.clone(),
sin: sin.clone(),
neg_inf: neg_inf.clone(),
kv_cache,
use_flash_attn,
span_attn,
span_rot,
})
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, embedding_length),
layers,
output_norm,
output,
masks: HashMap::new(),
span,
span_output,
})
}
fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
pub fn forward(&mut self, xs: &Tensor, index_pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len) = xs.dims2()?;
let mask = if seq_len == 1 {
None
} else {
Some(self.mask(seq_len, xs.device())?)
};
let _enter = self.span.enter();
let mut xs = self.tok_embeddings.forward(xs)?;
for layer in self.layers.iter_mut() {
let residual = &xs;
let ys = xs.apply(&layer.attn_norm)?;
let ys = layer.forward_attn(&ys, mask.as_ref(), index_pos)?;
let ys = (ys + residual)?;
let residual = &ys;
let ys = ys.apply(&layer.ffn_norm)?;
let ys = layer.mlp.forward(&ys)?;
xs = (ys + residual)?
}
let xs = xs.apply(&self.output_norm)?.i((.., seq_len - 1, ..))?;
let _enter = self.span_output.enter();
self.output.forward(&xs)
}
}
| candle/candle-transformers/src/models/quantized_phi3.rs/0 | {
"file_path": "candle/candle-transformers/src/models/quantized_phi3.rs",
"repo_id": "candle",
"token_count": 6108
} |
use candle::{IndexOp, Result, Tensor};
use candle_nn::{Module, VarBuilder};
use super::transformer::TwoWayTransformer;
#[derive(Debug)]
struct MlpMaskDecoder {
layers: Vec<super::Linear>,
sigmoid_output: bool,
span: tracing::Span,
}
impl MlpMaskDecoder {
fn new(
input_dim: usize,
hidden_dim: usize,
output_dim: usize,
num_layers: usize,
sigmoid_output: bool,
vb: VarBuilder,
) -> Result<Self> {
let mut layers = Vec::with_capacity(num_layers);
let vb = vb.pp("layers");
for i in 0..num_layers {
let in_dim = if i == 0 { input_dim } else { hidden_dim };
let out_dim = if i + 1 == num_layers {
output_dim
} else {
hidden_dim
};
let layer = super::linear(vb.pp(i), in_dim, out_dim, true)?;
layers.push(layer)
}
let span = tracing::span!(tracing::Level::TRACE, "mlp-mask-decoder");
Ok(Self {
layers,
sigmoid_output,
span,
})
}
}
impl Module for MlpMaskDecoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut xs = xs.clone();
for (i, layer) in self.layers.iter().enumerate() {
xs = layer.forward(&xs)?;
if i + 1 < self.layers.len() {
xs = xs.relu()?
}
}
if self.sigmoid_output {
candle_nn::ops::sigmoid(&xs)
} else {
Ok(xs)
}
}
}
#[derive(Debug)]
pub struct MaskDecoder {
iou_token: candle_nn::Embedding,
mask_tokens: candle_nn::Embedding,
iou_prediction_head: MlpMaskDecoder,
output_upscaling_conv1: candle_nn::ConvTranspose2d,
output_upscaling_ln: super::LayerNorm2d,
output_upscaling_conv2: candle_nn::ConvTranspose2d,
num_mask_tokens: usize,
output_hypernetworks_mlps: Vec<MlpMaskDecoder>,
transformer: TwoWayTransformer,
span: tracing::Span,
}
impl MaskDecoder {
pub fn new(
transformer_dim: usize,
num_multimask_outputs: usize,
iou_head_depth: usize,
iou_head_hidden_dim: usize,
vb: VarBuilder,
) -> Result<Self> {
let num_mask_tokens = num_multimask_outputs + 1;
let iou_prediction_head = MlpMaskDecoder::new(
transformer_dim,
iou_head_hidden_dim,
num_mask_tokens,
iou_head_depth,
false,
vb.pp("iou_prediction_head"),
)?;
let iou_token = candle_nn::embedding(1, transformer_dim, vb.pp("iou_token"))?;
let mask_tokens =
candle_nn::embedding(num_mask_tokens, transformer_dim, vb.pp("mask_tokens"))?;
let cfg = candle_nn::ConvTranspose2dConfig {
stride: 2,
..Default::default()
};
let output_upscaling_conv1 = candle_nn::conv_transpose2d(
transformer_dim,
transformer_dim / 4,
2,
cfg,
vb.pp("output_upscaling.0"),
)?;
let output_upscaling_ln =
super::LayerNorm2d::new(transformer_dim / 4, 1e-6, vb.pp("output_upscaling.1"))?;
let output_upscaling_conv2 = candle_nn::conv_transpose2d(
transformer_dim / 4,
transformer_dim / 8,
2,
cfg,
vb.pp("output_upscaling.3"),
)?;
let mut output_hypernetworks_mlps = Vec::with_capacity(num_mask_tokens);
let vb_o = vb.pp("output_hypernetworks_mlps");
for i in 0..num_mask_tokens {
let mlp = MlpMaskDecoder::new(
transformer_dim,
transformer_dim,
transformer_dim / 8,
3,
false,
vb_o.pp(i),
)?;
output_hypernetworks_mlps.push(mlp)
}
let transformer = TwoWayTransformer::new(
/* depth */ 2,
/* embedding_dim */ transformer_dim,
/* num_heads */ 8,
/* mlp_dim */ 2048,
vb.pp("transformer"),
)?;
let span = tracing::span!(tracing::Level::TRACE, "mask-decoder");
Ok(Self {
iou_token,
mask_tokens,
iou_prediction_head,
output_upscaling_conv1,
output_upscaling_ln,
output_upscaling_conv2,
num_mask_tokens,
output_hypernetworks_mlps,
transformer,
span,
})
}
pub fn forward(
&self,
image_embeddings: &Tensor,
image_pe: &Tensor,
sparse_prompt_embeddings: &Tensor,
dense_prompt_embeddings: &Tensor,
multimask_output: bool,
) -> Result<(Tensor, Tensor)> {
let _enter = self.span.enter();
let (masks, iou_pred) = self.predict_masks(
image_embeddings,
image_pe,
sparse_prompt_embeddings,
dense_prompt_embeddings,
)?;
let masks = if multimask_output {
masks.i((.., 1..))?
} else {
masks.i((.., 0..1))?
};
let iou_pred = if multimask_output {
iou_pred.i((.., 1..))?
} else {
iou_pred.i((.., 0..1))?
};
Ok((masks, iou_pred))
}
fn predict_masks(
&self,
image_embeddings: &Tensor,
image_pe: &Tensor,
sparse_prompt_embeddings: &Tensor,
dense_prompt_embeddings: &Tensor,
) -> Result<(Tensor, Tensor)> {
// Concatenate output tokens.
let output_tokens = Tensor::cat(
&[self.iou_token.embeddings(), self.mask_tokens.embeddings()],
0,
)?;
let (d1, d2) = output_tokens.dims2()?;
let output_tokens =
output_tokens
.unsqueeze(0)?
.expand((sparse_prompt_embeddings.dim(0)?, d1, d2))?;
let tokens = Tensor::cat(&[&output_tokens, sparse_prompt_embeddings], 1)?;
// Expand per-image data in batch direction to be per mask
let src = repeat_interleave(image_embeddings, tokens.dim(0)?, 0)?;
let src = src.broadcast_add(dense_prompt_embeddings)?;
let pos_src = repeat_interleave(image_pe, tokens.dim(0)?, 0)?;
let (b, c, h, w) = src.dims4()?;
// Run the transformer
let (hs, src) = self.transformer.forward(&src, &pos_src, &tokens)?;
let iou_token_out = hs.i((.., 0))?;
let mask_tokens_out = hs.i((.., 1..1 + self.num_mask_tokens))?;
// Upscale mask embeddings and predict masks using the masks tokens.
let src = src.transpose(1, 2)?.reshape((b, c, h, w))?;
let upscaled_embedding = self
.output_upscaling_conv1
.forward(&src)?
.apply(&self.output_upscaling_ln)?
.gelu()?
.apply(&self.output_upscaling_conv2)?
.gelu()?;
let mut hyper_in_list = Vec::with_capacity(self.num_mask_tokens);
for (i, mlp) in self.output_hypernetworks_mlps.iter().enumerate() {
let h = mlp.forward(&mask_tokens_out.i((.., i))?)?;
hyper_in_list.push(h)
}
let hyper_in = Tensor::stack(hyper_in_list.as_slice(), 1)?.contiguous()?;
let (b, c, h, w) = upscaled_embedding.dims4()?;
let masks = hyper_in.matmul(&upscaled_embedding.reshape((b, c, h * w))?)?;
let masks = masks.reshape((b, (), h, w))?;
// Generate mask quality predictions.
let iou_pred = self.iou_prediction_head.forward(&iou_token_out)?;
Ok((masks, iou_pred))
}
}
// Equivalent to torch.repeat_interleave
fn repeat_interleave(img: &Tensor, repeats: usize, dim: usize) -> Result<Tensor> {
let img = img.unsqueeze(dim + 1)?;
let mut dims = img.dims().to_vec();
dims[dim + 1] = repeats;
img.broadcast_as(dims)?.flatten(dim, dim + 1)
}
| candle/candle-transformers/src/models/segment_anything/mask_decoder.rs/0 | {
"file_path": "candle/candle-transformers/src/models/segment_anything/mask_decoder.rs",
"repo_id": "candle",
"token_count": 4213
} |
//! 2D UNet Denoising Models
//!
//! The 2D Unet models take as input a noisy sample and the current diffusion
//! timestep and return a denoised version of the input.
use super::embeddings::{TimestepEmbedding, Timesteps};
use super::unet_2d_blocks::*;
use crate::models::with_tracing::{conv2d, Conv2d};
use candle::{Result, Tensor};
use candle_nn as nn;
use candle_nn::Module;
#[derive(Debug, Clone, Copy)]
pub struct BlockConfig {
pub out_channels: usize,
/// When `None` no cross-attn is used, when `Some(d)` then cross-attn is used and `d` is the
/// number of transformer blocks to be used.
pub use_cross_attn: Option<usize>,
pub attention_head_dim: usize,
}
#[derive(Debug, Clone)]
pub struct UNet2DConditionModelConfig {
pub center_input_sample: bool,
pub flip_sin_to_cos: bool,
pub freq_shift: f64,
pub blocks: Vec<BlockConfig>,
pub layers_per_block: usize,
pub downsample_padding: usize,
pub mid_block_scale_factor: f64,
pub norm_num_groups: usize,
pub norm_eps: f64,
pub cross_attention_dim: usize,
pub sliced_attention_size: Option<usize>,
pub use_linear_projection: bool,
}
impl Default for UNet2DConditionModelConfig {
fn default() -> Self {
Self {
center_input_sample: false,
flip_sin_to_cos: true,
freq_shift: 0.,
blocks: vec![
BlockConfig {
out_channels: 320,
use_cross_attn: Some(1),
attention_head_dim: 8,
},
BlockConfig {
out_channels: 640,
use_cross_attn: Some(1),
attention_head_dim: 8,
},
BlockConfig {
out_channels: 1280,
use_cross_attn: Some(1),
attention_head_dim: 8,
},
BlockConfig {
out_channels: 1280,
use_cross_attn: None,
attention_head_dim: 8,
},
],
layers_per_block: 2,
downsample_padding: 1,
mid_block_scale_factor: 1.,
norm_num_groups: 32,
norm_eps: 1e-5,
cross_attention_dim: 1280,
sliced_attention_size: None,
use_linear_projection: false,
}
}
}
#[derive(Debug)]
pub(crate) enum UNetDownBlock {
Basic(DownBlock2D),
CrossAttn(CrossAttnDownBlock2D),
}
#[derive(Debug)]
enum UNetUpBlock {
Basic(UpBlock2D),
CrossAttn(CrossAttnUpBlock2D),
}
#[derive(Debug)]
pub struct UNet2DConditionModel {
conv_in: Conv2d,
time_proj: Timesteps,
time_embedding: TimestepEmbedding,
down_blocks: Vec<UNetDownBlock>,
mid_block: UNetMidBlock2DCrossAttn,
up_blocks: Vec<UNetUpBlock>,
conv_norm_out: nn::GroupNorm,
conv_out: Conv2d,
span: tracing::Span,
config: UNet2DConditionModelConfig,
}
impl UNet2DConditionModel {
pub fn new(
vs: nn::VarBuilder,
in_channels: usize,
out_channels: usize,
use_flash_attn: bool,
config: UNet2DConditionModelConfig,
) -> Result<Self> {
let n_blocks = config.blocks.len();
let b_channels = config.blocks[0].out_channels;
let bl_channels = config.blocks.last().unwrap().out_channels;
let bl_attention_head_dim = config.blocks.last().unwrap().attention_head_dim;
let time_embed_dim = b_channels * 4;
let conv_cfg = nn::Conv2dConfig {
padding: 1,
..Default::default()
};
let conv_in = conv2d(in_channels, b_channels, 3, conv_cfg, vs.pp("conv_in"))?;
let time_proj = Timesteps::new(b_channels, config.flip_sin_to_cos, config.freq_shift);
let time_embedding =
TimestepEmbedding::new(vs.pp("time_embedding"), b_channels, time_embed_dim)?;
let vs_db = vs.pp("down_blocks");
let down_blocks = (0..n_blocks)
.map(|i| {
let BlockConfig {
out_channels,
use_cross_attn,
attention_head_dim,
} = config.blocks[i];
// Enable automatic attention slicing if the config sliced_attention_size is set to 0.
let sliced_attention_size = match config.sliced_attention_size {
Some(0) => Some(attention_head_dim / 2),
_ => config.sliced_attention_size,
};
let in_channels = if i > 0 {
config.blocks[i - 1].out_channels
} else {
b_channels
};
let db_cfg = DownBlock2DConfig {
num_layers: config.layers_per_block,
resnet_eps: config.norm_eps,
resnet_groups: config.norm_num_groups,
add_downsample: i < n_blocks - 1,
downsample_padding: config.downsample_padding,
..Default::default()
};
if let Some(transformer_layers_per_block) = use_cross_attn {
let config = CrossAttnDownBlock2DConfig {
downblock: db_cfg,
attn_num_head_channels: attention_head_dim,
cross_attention_dim: config.cross_attention_dim,
sliced_attention_size,
use_linear_projection: config.use_linear_projection,
transformer_layers_per_block,
};
let block = CrossAttnDownBlock2D::new(
vs_db.pp(i.to_string()),
in_channels,
out_channels,
Some(time_embed_dim),
use_flash_attn,
config,
)?;
Ok(UNetDownBlock::CrossAttn(block))
} else {
let block = DownBlock2D::new(
vs_db.pp(i.to_string()),
in_channels,
out_channels,
Some(time_embed_dim),
db_cfg,
)?;
Ok(UNetDownBlock::Basic(block))
}
})
.collect::<Result<Vec<_>>>()?;
// https://github.com/huggingface/diffusers/blob/a76f2ad538e73b34d5fe7be08c8eb8ab38c7e90c/src/diffusers/models/unet_2d_condition.py#L462
let mid_transformer_layers_per_block = match config.blocks.last() {
None => 1,
Some(block) => block.use_cross_attn.unwrap_or(1),
};
let mid_cfg = UNetMidBlock2DCrossAttnConfig {
resnet_eps: config.norm_eps,
output_scale_factor: config.mid_block_scale_factor,
cross_attn_dim: config.cross_attention_dim,
attn_num_head_channels: bl_attention_head_dim,
resnet_groups: Some(config.norm_num_groups),
use_linear_projection: config.use_linear_projection,
transformer_layers_per_block: mid_transformer_layers_per_block,
..Default::default()
};
let mid_block = UNetMidBlock2DCrossAttn::new(
vs.pp("mid_block"),
bl_channels,
Some(time_embed_dim),
use_flash_attn,
mid_cfg,
)?;
let vs_ub = vs.pp("up_blocks");
let up_blocks = (0..n_blocks)
.map(|i| {
let BlockConfig {
out_channels,
use_cross_attn,
attention_head_dim,
} = config.blocks[n_blocks - 1 - i];
// Enable automatic attention slicing if the config sliced_attention_size is set to 0.
let sliced_attention_size = match config.sliced_attention_size {
Some(0) => Some(attention_head_dim / 2),
_ => config.sliced_attention_size,
};
let prev_out_channels = if i > 0 {
config.blocks[n_blocks - i].out_channels
} else {
bl_channels
};
let in_channels = {
let index = if i == n_blocks - 1 {
0
} else {
n_blocks - i - 2
};
config.blocks[index].out_channels
};
let ub_cfg = UpBlock2DConfig {
num_layers: config.layers_per_block + 1,
resnet_eps: config.norm_eps,
resnet_groups: config.norm_num_groups,
add_upsample: i < n_blocks - 1,
..Default::default()
};
if let Some(transformer_layers_per_block) = use_cross_attn {
let config = CrossAttnUpBlock2DConfig {
upblock: ub_cfg,
attn_num_head_channels: attention_head_dim,
cross_attention_dim: config.cross_attention_dim,
sliced_attention_size,
use_linear_projection: config.use_linear_projection,
transformer_layers_per_block,
};
let block = CrossAttnUpBlock2D::new(
vs_ub.pp(i.to_string()),
in_channels,
prev_out_channels,
out_channels,
Some(time_embed_dim),
use_flash_attn,
config,
)?;
Ok(UNetUpBlock::CrossAttn(block))
} else {
let block = UpBlock2D::new(
vs_ub.pp(i.to_string()),
in_channels,
prev_out_channels,
out_channels,
Some(time_embed_dim),
ub_cfg,
)?;
Ok(UNetUpBlock::Basic(block))
}
})
.collect::<Result<Vec<_>>>()?;
let conv_norm_out = nn::group_norm(
config.norm_num_groups,
b_channels,
config.norm_eps,
vs.pp("conv_norm_out"),
)?;
let conv_out = conv2d(b_channels, out_channels, 3, conv_cfg, vs.pp("conv_out"))?;
let span = tracing::span!(tracing::Level::TRACE, "unet2d");
Ok(Self {
conv_in,
time_proj,
time_embedding,
down_blocks,
mid_block,
up_blocks,
conv_norm_out,
conv_out,
span,
config,
})
}
pub fn forward(
&self,
xs: &Tensor,
timestep: f64,
encoder_hidden_states: &Tensor,
) -> Result<Tensor> {
let _enter = self.span.enter();
self.forward_with_additional_residuals(xs, timestep, encoder_hidden_states, None, None)
}
pub fn forward_with_additional_residuals(
&self,
xs: &Tensor,
timestep: f64,
encoder_hidden_states: &Tensor,
down_block_additional_residuals: Option<&[Tensor]>,
mid_block_additional_residual: Option<&Tensor>,
) -> Result<Tensor> {
let (bsize, _channels, height, width) = xs.dims4()?;
let device = xs.device();
let n_blocks = self.config.blocks.len();
let num_upsamplers = n_blocks - 1;
let default_overall_up_factor = 2usize.pow(num_upsamplers as u32);
let forward_upsample_size =
height % default_overall_up_factor != 0 || width % default_overall_up_factor != 0;
// 0. center input if necessary
let xs = if self.config.center_input_sample {
((xs * 2.0)? - 1.0)?
} else {
xs.clone()
};
// 1. time
let emb = (Tensor::ones(bsize, xs.dtype(), device)? * timestep)?;
let emb = self.time_proj.forward(&emb)?;
let emb = self.time_embedding.forward(&emb)?;
// 2. pre-process
let xs = self.conv_in.forward(&xs)?;
// 3. down
let mut down_block_res_xs = vec![xs.clone()];
let mut xs = xs;
for down_block in self.down_blocks.iter() {
let (_xs, res_xs) = match down_block {
UNetDownBlock::Basic(b) => b.forward(&xs, Some(&emb))?,
UNetDownBlock::CrossAttn(b) => {
b.forward(&xs, Some(&emb), Some(encoder_hidden_states))?
}
};
down_block_res_xs.extend(res_xs);
xs = _xs;
}
let new_down_block_res_xs =
if let Some(down_block_additional_residuals) = down_block_additional_residuals {
let mut v = vec![];
// A previous version of this code had a bug because of the addition being made
// in place via += hence modifying the input of the mid block.
for (i, residuals) in down_block_additional_residuals.iter().enumerate() {
v.push((&down_block_res_xs[i] + residuals)?)
}
v
} else {
down_block_res_xs
};
let mut down_block_res_xs = new_down_block_res_xs;
// 4. mid
let xs = self
.mid_block
.forward(&xs, Some(&emb), Some(encoder_hidden_states))?;
let xs = match mid_block_additional_residual {
None => xs,
Some(m) => (m + xs)?,
};
// 5. up
let mut xs = xs;
let mut upsample_size = None;
for (i, up_block) in self.up_blocks.iter().enumerate() {
let n_resnets = match up_block {
UNetUpBlock::Basic(b) => b.resnets.len(),
UNetUpBlock::CrossAttn(b) => b.upblock.resnets.len(),
};
let res_xs = down_block_res_xs.split_off(down_block_res_xs.len() - n_resnets);
if i < n_blocks - 1 && forward_upsample_size {
let (_, _, h, w) = down_block_res_xs.last().unwrap().dims4()?;
upsample_size = Some((h, w))
}
xs = match up_block {
UNetUpBlock::Basic(b) => b.forward(&xs, &res_xs, Some(&emb), upsample_size)?,
UNetUpBlock::CrossAttn(b) => b.forward(
&xs,
&res_xs,
Some(&emb),
upsample_size,
Some(encoder_hidden_states),
)?,
};
}
// 6. post-process
let xs = self.conv_norm_out.forward(&xs)?;
let xs = nn::ops::silu(&xs)?;
self.conv_out.forward(&xs)
}
}
| candle/candle-transformers/src/models/stable_diffusion/unet_2d.rs/0 | {
"file_path": "candle/candle-transformers/src/models/stable_diffusion/unet_2d.rs",
"repo_id": "candle",
"token_count": 8419
} |
use candle::{Module, Result, Tensor};
use candle_nn::VarBuilder;
#[derive(Debug, Clone)]
pub struct Embedding {
inner: candle_nn::Embedding,
span: tracing::Span,
}
impl Embedding {
pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> {
let inner = candle_nn::embedding(d1, d2, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "embedding");
Ok(Self { inner, span })
}
pub fn from_weights(weights: Tensor) -> Result<Self> {
let (_in_size, out_size) = weights.dims2()?;
let inner = candle_nn::Embedding::new(weights, out_size);
let span = tracing::span!(tracing::Level::TRACE, "embedding");
Ok(Self { inner, span })
}
pub fn embeddings(&self) -> &Tensor {
self.inner.embeddings()
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
pub struct Linear {
inner: candle_nn::Linear,
span: tracing::Span,
}
impl Linear {
pub fn from_weights(weights: Tensor, bias: Option<Tensor>) -> Self {
let inner = candle_nn::Linear::new(weights, bias);
let span = tracing::span!(tracing::Level::TRACE, "linear");
Self { inner, span }
}
}
pub fn linear_b(d1: usize, d2: usize, b: bool, vb: VarBuilder) -> Result<Linear> {
let inner = candle_nn::linear_b(d1, d2, b, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { inner, span })
}
pub fn linear(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> {
let inner = candle_nn::linear(d1, d2, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { inner, span })
}
pub fn linear_no_bias(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> {
let inner = candle_nn::linear_no_bias(d1, d2, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { inner, span })
}
impl Module for Linear {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
// Wrap the conv2d op to provide some tracing.
#[derive(Debug, Clone)]
pub struct Conv2d {
inner: candle_nn::Conv2d,
span: tracing::Span,
}
impl Module for Conv2d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(x)
}
}
pub fn conv2d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: candle_nn::Conv2dConfig,
vs: candle_nn::VarBuilder,
) -> Result<Conv2d> {
let span = tracing::span!(tracing::Level::TRACE, "conv2d");
let inner = candle_nn::conv2d(in_channels, out_channels, kernel_size, cfg, vs)?;
Ok(Conv2d { inner, span })
}
// QMatMul wrapper adding some tracing.
#[derive(Clone)]
pub struct QMatMul {
inner: candle::quantized::QMatMul,
span: tracing::Span,
}
impl QMatMul {
pub fn new(
out_dim: usize,
in_dim: usize,
vb: crate::quantized_var_builder::VarBuilder,
) -> Result<Self> {
let ws = vb.get((in_dim, out_dim), "weight")?;
let inner = candle::quantized::QMatMul::from_arc(ws)?;
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
Ok(Self { inner, span })
}
pub fn from_weights(ws: std::sync::Arc<candle::quantized::QTensor>) -> Result<Self> {
let inner = candle::quantized::QMatMul::from_arc(ws)?;
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
Ok(Self { inner, span })
}
}
impl Module for QMatMul {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
impl std::fmt::Debug for QMatMul {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "QMatMul")
}
}
#[derive(Clone, Debug)]
pub struct LayerNorm {
inner: candle_nn::LayerNorm,
span: tracing::Span,
}
impl LayerNorm {
pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self {
let inner = candle_nn::LayerNorm::new(weight, bias, eps);
let span = tracing::span!(tracing::Level::TRACE, "layer-norm");
Self { inner, span }
}
}
impl Module for LayerNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
pub fn layer_norm<C: Into<candle_nn::LayerNormConfig>>(
size: usize,
c: C,
vb: VarBuilder,
) -> Result<LayerNorm> {
let inner = candle_nn::layer_norm(size, c, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "layer-norm");
Ok(LayerNorm { inner, span })
}
#[derive(Debug, Clone)]
pub struct RmsNorm {
inner: candle_nn::RmsNorm,
span: tracing::Span,
}
impl RmsNorm {
pub fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "rms-norm");
let inner = candle_nn::rms_norm(size, eps, vb)?;
Ok(Self { inner, span })
}
pub fn forward_diff(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward_diff(x)
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(x)
}
}
| candle/candle-transformers/src/models/with_tracing.rs/0 | {
"file_path": "candle/candle-transformers/src/models/with_tracing.rs",
"repo_id": "candle",
"token_count": 2381
} |
use candle::{Device, Result, Tensor};
use candle_transformers::generation::LogitsProcessor;
#[test]
fn sample_with_zero_temperature() -> Result<()> {
let mut logits_process = LogitsProcessor::new(1337, None, None);
let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?;
let token = logits_process.sample(&logits)?;
assert_eq!(token, 3);
Ok(())
}
#[test]
fn sample_with_temperature() -> Result<()> {
let mut logits_process = LogitsProcessor::new(42, Some(0.9), None);
let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?;
let token = logits_process.sample(&logits)?;
assert_eq!(token, 0);
Ok(())
}
#[test]
fn sample_with_top_p() -> Result<()> {
let mut logits_process = LogitsProcessor::new(42, Some(1.0), Some(0.5));
let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?;
let token = logits_process.sample(&logits)?;
assert_eq!(token, 2);
Ok(())
}
#[test]
fn sample_with_top_k() -> Result<()> {
let mut logits_process = LogitsProcessor::from_sampling(
42,
candle_transformers::generation::Sampling::TopK {
k: 1,
temperature: 1.0,
},
);
let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?;
let token = logits_process.sample(&logits)?;
assert_eq!(token, 3);
let mut logits_process = LogitsProcessor::from_sampling(
42,
candle_transformers::generation::Sampling::TopK {
k: 2,
temperature: 1.0,
},
);
let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?;
let token = logits_process.sample(&logits)?;
assert_eq!(token, 3);
let token = logits_process.sample(&logits)?;
assert_eq!(token, 2);
Ok(())
}
| candle/candle-transformers/tests/generation_tests.rs/0 | {
"file_path": "candle/candle-transformers/tests/generation_tests.rs",
"repo_id": "candle",
"token_count": 806
} |
use wasm_bindgen::prelude::*;
pub mod token_output_stream;
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
}
#[macro_export]
macro_rules! console_log {
// Note that this is using the `log` function imported above during
// `bare_bones`
($($t:tt)*) => ($crate::log(&format_args!($($t)*).to_string()))
}
| candle/candle-wasm-examples/blip/src/lib.rs/0 | {
"file_path": "candle/candle-wasm-examples/blip/src/lib.rs",
"repo_id": "candle",
"token_count": 192
} |
## Running [Moondream 2](https://huggingface.co/vikhyatk/moondream2) Model Example
### Vanilla JS and WebWorkers
To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library:
```bash
sh build-lib.sh
```
This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module:
```js
import init, { Model } from "./build/m.js";
```
The full example can be found under `./index.html`. All needed assets are fetched from the web, so no need to download anything.
Finally, you can preview the example by running a local HTTP server. For example:
```bash
python -m http.server
```
Then open `http://localhost:8000/index.html` in your browser.
| candle/candle-wasm-examples/moondream/README.md/0 | {
"file_path": "candle/candle-wasm-examples/moondream/README.md",
"repo_id": "candle",
"token_count": 217
} |
## Running Whisper Examples
Here, we provide two examples of how to run Whisper using a Candle-compiled WASM binary and runtimes.
### Pure Rust UI
To build and test the UI made in Rust you will need [Trunk](https://trunkrs.dev/#install)
From the `candle-wasm-examples/whisper` directory run:
Download assets:
```bash
# mel filters
wget -c https://huggingface.co/spaces/lmz/candle-whisper/resolve/main/mel_filters.safetensors
# Model and tokenizer tiny.en
wget -c https://huggingface.co/openai/whisper-tiny.en/resolve/main/model.safetensors -P whisper-tiny.en
wget -c https://huggingface.co/openai/whisper-tiny.en/raw/main/tokenizer.json -P whisper-tiny.en
wget -c https://huggingface.co/openai/whisper-tiny.en/raw/main/config.json -P whisper-tiny.en
# model and tokenizer tiny multilanguage
wget -c https://huggingface.co/openai/whisper-tiny/resolve/main/model.safetensors -P whisper-tiny
wget -c https://huggingface.co/openai/whisper-tiny/raw/main/tokenizer.json -P whisper-tiny
wget -c https://huggingface.co/openai/whisper-tiny/raw/main/config.json -P whisper-tiny
#quantized
wget -c https://huggingface.co/lmz/candle-whisper/resolve/main/model-tiny-en-q80.gguf -P quantized
wget -c https://huggingface.co/lmz/candle-whisper/raw/main/tokenizer-tiny-en.json -P quantized
wget -c https://huggingface.co/lmz/candle-whisper/raw/main/config-tiny-en.json -P quantized
# Audio samples
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_gb0.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_a13.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_gb1.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_hp0.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_jfk.wav -P audios
wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_mm0.wav -P audios
```
Run hot reload server:
```bash
trunk serve --release --public-url / --port 8080
```
### Vanilla JS and WebWorkers
To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library:
```bash
sh build-lib.sh
```
This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module:
```js
import init, { Decoder } from "./build/m.js";
```
The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything.
Finally, you can preview the example by running a local HTTP server. For example:
```bash
python -m http.server
```
Then open `http://localhost:8000/lib-example.html` in your browser.
| candle/candle-wasm-examples/whisper/README.md/0 | {
"file_path": "candle/candle-wasm-examples/whisper/README.md",
"repo_id": "candle",
"token_count": 1023
} |
{
"moz:firefoxOptions": {
"prefs": {
"media.navigator.streams.fake": true,
"media.navigator.permission.disabled": true
},
"args": []
},
"goog:chromeOptions": {
"args": [
"--use-fake-device-for-media-stream",
"--use-fake-ui-for-media-stream"
]
}
}
| candle/candle-wasm-tests/webdriver.json/0 | {
"file_path": "candle/candle-wasm-tests/webdriver.json",
"repo_id": "candle",
"token_count": 143
} |
apiVersion: v1
kind: ConfigMap
metadata:
labels: {{ include "labels.standard" . | nindent 4 }}
name: {{ include "name" . }}
namespace: {{ .Release.Namespace }}
data:
{{- range $key, $value := $.Values.envVars }}
{{ $key }}: {{ $value | quote }}
{{- end }}
| chat-ui/chart/templates/config.yaml/0 | {
"file_path": "chat-ui/chart/templates/config.yaml",
"repo_id": "chat-ui",
"token_count": 96
} |
# Anthropic
| Feature | Available |
| --------------------------- | --------- |
| [Tools](../tools) | No |
| [Multimodal](../multimodal) | Yes |
We also support Anthropic models (including multimodal ones via `multmodal: true`) through the official SDK. You may provide your API key via the `ANTHROPIC_API_KEY` env variable, or alternatively, through the `endpoints.apiKey` as per the following example.
```ini
MODELS=`[
{
"name": "claude-3-haiku-20240307",
"displayName": "Claude 3 Haiku",
"description": "Fastest and most compact model for near-instant responsiveness",
"multimodal": true,
"parameters": {
"max_new_tokens": 4096,
},
"endpoints": [
{
"type": "anthropic",
// optionals
"apiKey": "sk-ant-...",
"baseURL": "https://api.anthropic.com",
"defaultHeaders": {},
"defaultQuery": {}
}
]
},
{
"name": "claude-3-sonnet-20240229",
"displayName": "Claude 3 Sonnet",
"description": "Ideal balance of intelligence and speed",
"multimodal": true,
"parameters": {
"max_new_tokens": 4096,
},
"endpoints": [
{
"type": "anthropic",
// optionals
"apiKey": "sk-ant-...",
"baseURL": "https://api.anthropic.com",
"defaultHeaders": {},
"defaultQuery": {}
}
]
},
{
"name": "claude-3-opus-20240229",
"displayName": "Claude 3 Opus",
"description": "Most powerful model for highly complex tasks",
"multimodal": true,
"parameters": {
"max_new_tokens": 4096
},
"endpoints": [
{
"type": "anthropic",
// optionals
"apiKey": "sk-ant-...",
"baseURL": "https://api.anthropic.com",
"defaultHeaders": {},
"defaultQuery": {}
}
]
}
]`
```
## VertexAI
We also support using Anthropic models running on Vertex AI. Authentication is done using Google Application Default Credentials. Project ID can be provided through the `endpoints.projectId` as per the following example:
```ini
MODELS=`[
{
"name": "claude-3-haiku@20240307",
"displayName": "Claude 3 Haiku",
"description": "Fastest, most compact model for near-instant responsiveness",
"multimodal": true,
"parameters": {
"max_new_tokens": 4096
},
"endpoints": [
{
"type": "anthropic-vertex",
"region": "us-central1",
"projectId": "gcp-project-id",
// optionals
"defaultHeaders": {},
"defaultQuery": {}
}
]
},
{
"name": "claude-3-sonnet@20240229",
"displayName": "Claude 3 Sonnet",
"description": "Ideal balance of intelligence and speed",
"multimodal": true,
"parameters": {
"max_new_tokens": 4096,
},
"endpoints": [
{
"type": "anthropic-vertex",
"region": "us-central1",
"projectId": "gcp-project-id",
// optionals
"defaultHeaders": {},
"defaultQuery": {}
}
]
},
]`
```
| chat-ui/docs/source/configuration/models/providers/anthropic.md/0 | {
"file_path": "chat-ui/docs/source/configuration/models/providers/anthropic.md",
"repo_id": "chat-ui",
"token_count": 1541
} |
# Copy HuggingChat
The config file for HuggingChat is stored in the `chart/env/prod.yaml` file. It is the source of truth for the environment variables used for our CI/CD pipeline. For HuggingChat, as we need to customize the app color, as well as the base path, we build a custom docker image. You can find the workflow here.
<Tip>
If you want to make changes to the model config used in production for HuggingChat, you should do so against `chart/env/prod.yaml`.
</Tip>
### Running a copy of HuggingChat locally
If you want to run an exact copy of HuggingChat locally, you will need to do the following first:
1. Create an [OAuth App on the hub](https://huggingface.co/settings/applications/new) with `openid profile email` permissions. Make sure to set the callback URL to something like `http://localhost:5173/chat/login/callback` which matches the right path for your local instance.
2. Create a [HF Token](https://huggingface.co/settings/tokens) with your Hugging Face account. You will need a Pro account to be able to access some of the larger models available through HuggingChat.
3. Create a free account with [serper.dev](https://serper.dev/) (you will get 2500 free search queries)
4. Run an instance of MongoDB, however you want. (Local or remote)
You can then create a new `.env.SECRET_CONFIG` file with the following content
```ini
MONGODB_URL=<link to your mongo DB from step 4>
HF_TOKEN=<your HF token from step 2>
OPENID_CONFIG=`{
PROVIDER_URL: "https://huggingface.co",
CLIENT_ID: "<your client ID from step 1>",
CLIENT_SECRET: "<your client secret from step 1>",
}`
SERPER_API_KEY=<your serper API key from step 3>
MESSAGES_BEFORE_LOGIN=<can be any numerical value, or set to 0 to require login>
```
You can then run `npm run updateLocalEnv` in the root of chat-ui. This will create a `.env.local` file which combines the `chart/env/prod.yaml` and the `.env.SECRET_CONFIG` file. You can then run `npm run dev` to start your local instance of HuggingChat.
### Populate database
<Tip warning={true}>
The `MONGODB_URL` used for this script will be fetched from `.env.local`. Make sure it's correct! The command runs directly on the database.
</Tip>
You can populate the database using faker data using the `populate` script:
```bash
npm run populate <flags here>
```
At least one flag must be specified, the following flags are available:
- `reset` - resets the database
- `all` - populates all tables
- `users` - populates the users table
- `settings` - populates the settings table for existing users
- `assistants` - populates the assistants table for existing users
- `conversations` - populates the conversations table for existing users
For example, you could use it like so:
```bash
npm run populate reset
```
to clear out the database. Then login in the app to create your user and run the following command:
```bash
npm run populate users settings assistants conversations
```
to populate the database with fake data, including fake conversations and assistants for your user.
| chat-ui/docs/source/developing/copy-huggingchat.md/0 | {
"file_path": "chat-ui/docs/source/developing/copy-huggingchat.md",
"repo_id": "chat-ui",
"token_count": 870
} |
import { env } from "$env/dynamic/private";
import { env as envPublic } from "$env/dynamic/public";
import type { Handle, HandleServerError } from "@sveltejs/kit";
import { collections } from "$lib/server/database";
import { base } from "$app/paths";
import { findUser, refreshSessionCookie, requiresUser } from "$lib/server/auth";
import { ERROR_MESSAGES } from "$lib/stores/errors";
import { sha256 } from "$lib/utils/sha256";
import { addWeeks } from "date-fns";
import { checkAndRunMigrations } from "$lib/migrations/migrations";
import { building } from "$app/environment";
import { logger } from "$lib/server/logger";
import { AbortedGenerations } from "$lib/server/abortedGenerations";
import { MetricsServer } from "$lib/server/metrics";
import { initExitHandler } from "$lib/server/exitHandler";
import { ObjectId } from "mongodb";
import { refreshAssistantsCounts } from "$lib/jobs/refresh-assistants-counts";
import { refreshConversationStats } from "$lib/jobs/refresh-conversation-stats";
// TODO: move this code on a started server hook, instead of using a "building" flag
if (!building) {
// Set HF_TOKEN as a process variable for Transformers.JS to see it
process.env.HF_TOKEN ??= env.HF_TOKEN;
logger.info("Starting server...");
initExitHandler();
checkAndRunMigrations();
if (env.ENABLE_ASSISTANTS) {
refreshAssistantsCounts();
}
refreshConversationStats();
// Init metrics server
MetricsServer.getInstance();
// Init AbortedGenerations refresh process
AbortedGenerations.getInstance();
}
export const handleError: HandleServerError = async ({ error, event, status, message }) => {
// handle 404
if (building) {
throw error;
}
if (event.route.id === null) {
return {
message: `Page ${event.url.pathname} not found`,
};
}
const errorId = crypto.randomUUID();
logger.error({
locals: event.locals,
url: event.request.url,
params: event.params,
request: event.request,
message,
error,
errorId,
status,
stack: error instanceof Error ? error.stack : undefined,
});
return {
message: "An error occurred",
errorId,
};
};
export const handle: Handle = async ({ event, resolve }) => {
logger.debug({
locals: event.locals,
url: event.url.pathname,
params: event.params,
request: event.request,
});
if (event.url.pathname.startsWith(`${base}/api/`) && env.EXPOSE_API !== "true") {
return new Response("API is disabled", { status: 403 });
}
function errorResponse(status: number, message: string) {
const sendJson =
event.request.headers.get("accept")?.includes("application/json") ||
event.request.headers.get("content-type")?.includes("application/json");
return new Response(sendJson ? JSON.stringify({ error: message }) : message, {
status,
headers: {
"content-type": sendJson ? "application/json" : "text/plain",
},
});
}
if (event.url.pathname.startsWith(`${base}/admin/`) || event.url.pathname === `${base}/admin`) {
const ADMIN_SECRET = env.ADMIN_API_SECRET || env.PARQUET_EXPORT_SECRET;
if (!ADMIN_SECRET) {
return errorResponse(500, "Admin API is not configured");
}
if (event.request.headers.get("Authorization") !== `Bearer ${ADMIN_SECRET}`) {
return errorResponse(401, "Unauthorized");
}
}
const token = event.cookies.get(env.COOKIE_NAME);
// if the trusted email header is set we use it to get the user email
const email = env.TRUSTED_EMAIL_HEADER
? event.request.headers.get(env.TRUSTED_EMAIL_HEADER)
: null;
let secretSessionId: string | null = null;
let sessionId: string | null = null;
if (email) {
secretSessionId = sessionId = await sha256(email);
event.locals.user = {
// generate id based on email
_id: new ObjectId(sessionId.slice(0, 24)),
name: email,
email,
createdAt: new Date(),
updatedAt: new Date(),
hfUserId: email,
avatarUrl: "",
logoutDisabled: true,
};
} else if (token) {
secretSessionId = token;
sessionId = await sha256(token);
const user = await findUser(sessionId);
if (user) {
event.locals.user = user;
}
} else if (event.url.pathname.startsWith(`${base}/api/`) && env.USE_HF_TOKEN_IN_API === "true") {
// if the request goes to the API and no user is available in the header
// check if a bearer token is available in the Authorization header
const authorization = event.request.headers.get("Authorization");
if (authorization && authorization.startsWith("Bearer ")) {
const token = authorization.slice(7);
const hash = await sha256(token);
sessionId = secretSessionId = hash;
// check if the hash is in the DB and get the user
// else check against https://huggingface.co/api/whoami-v2
const cacheHit = await collections.tokenCaches.findOne({ tokenHash: hash });
if (cacheHit) {
const user = await collections.users.findOne({ hfUserId: cacheHit.userId });
if (!user) {
return errorResponse(500, "User not found");
}
event.locals.user = user;
} else {
const response = await fetch("https://huggingface.co/api/whoami-v2", {
headers: {
Authorization: `Bearer ${token}`,
},
});
if (!response.ok) {
return errorResponse(401, "Unauthorized");
}
const data = await response.json();
const user = await collections.users.findOne({ hfUserId: data.id });
if (!user) {
return errorResponse(500, "User not found");
}
await collections.tokenCaches.insertOne({
tokenHash: hash,
userId: data.id,
createdAt: new Date(),
updatedAt: new Date(),
});
event.locals.user = user;
}
}
}
if (!sessionId || !secretSessionId) {
secretSessionId = crypto.randomUUID();
sessionId = await sha256(secretSessionId);
if (await collections.sessions.findOne({ sessionId })) {
return errorResponse(500, "Session ID collision");
}
}
event.locals.sessionId = sessionId;
// CSRF protection
const requestContentType = event.request.headers.get("content-type")?.split(";")[0] ?? "";
/** https://developer.mozilla.org/en-US/docs/Web/HTML/Element/form#attr-enctype */
const nativeFormContentTypes = [
"multipart/form-data",
"application/x-www-form-urlencoded",
"text/plain",
];
if (event.request.method === "POST") {
if (nativeFormContentTypes.includes(requestContentType)) {
const origin = event.request.headers.get("origin");
if (!origin) {
return errorResponse(403, "Non-JSON form requests need to have an origin");
}
const validOrigins = [
new URL(event.request.url).host,
...(envPublic.PUBLIC_ORIGIN ? [new URL(envPublic.PUBLIC_ORIGIN).host] : []),
];
if (!validOrigins.includes(new URL(origin).host)) {
return errorResponse(403, "Invalid referer for POST request");
}
}
}
if (event.request.method === "POST") {
// if the request is a POST request we refresh the cookie
refreshSessionCookie(event.cookies, secretSessionId);
await collections.sessions.updateOne(
{ sessionId },
{ $set: { updatedAt: new Date(), expiresAt: addWeeks(new Date(), 2) } }
);
}
if (
!event.url.pathname.startsWith(`${base}/login`) &&
!event.url.pathname.startsWith(`${base}/admin`) &&
!event.url.pathname.startsWith(`${base}/settings`) &&
!["GET", "OPTIONS", "HEAD"].includes(event.request.method)
) {
if (
!event.locals.user &&
requiresUser &&
!((env.MESSAGES_BEFORE_LOGIN ? parseInt(env.MESSAGES_BEFORE_LOGIN) : 0) > 0)
) {
return errorResponse(401, ERROR_MESSAGES.authOnly);
}
// if login is not required and the call is not from /settings and we display the ethics modal with PUBLIC_APP_DISCLAIMER
// we check if the user has accepted the ethics modal first.
// If login is required, `ethicsModalAcceptedAt` is already true at this point, so do not pass this condition. This saves a DB call.
if (
!requiresUser &&
!event.url.pathname.startsWith(`${base}/settings`) &&
envPublic.PUBLIC_APP_DISCLAIMER === "1"
) {
const hasAcceptedEthicsModal = await collections.settings.countDocuments({
sessionId: event.locals.sessionId,
ethicsModalAcceptedAt: { $exists: true },
});
if (!hasAcceptedEthicsModal) {
return errorResponse(405, "You need to accept the welcome modal first");
}
}
}
let replaced = false;
const response = await resolve(event, {
transformPageChunk: (chunk) => {
// For some reason, Sveltekit doesn't let us load env variables from .env in the app.html template
if (replaced || !chunk.html.includes("%gaId%")) {
return chunk.html;
}
replaced = true;
return chunk.html.replace("%gaId%", envPublic.PUBLIC_GOOGLE_ANALYTICS_ID);
},
});
// Add CSP header to disallow framing if ALLOW_IFRAME is not "true"
if (env.ALLOW_IFRAME !== "true") {
response.headers.append("Content-Security-Policy", "frame-ancestors 'none';");
}
return response;
};
| chat-ui/src/hooks.server.ts/0 | {
"file_path": "chat-ui/src/hooks.server.ts",
"repo_id": "chat-ui",
"token_count": 3211
} |
<script lang="ts">
import { createEventDispatcher, onDestroy, onMount } from "svelte";
import { cubicOut } from "svelte/easing";
import { fade, fly } from "svelte/transition";
import Portal from "./Portal.svelte";
import { browser } from "$app/environment";
interface Props {
width?: string;
children?: import("svelte").Snippet;
}
let { width = "max-w-sm", children }: Props = $props();
let backdropEl: HTMLDivElement | undefined = $state();
let modalEl: HTMLDivElement | undefined = $state();
const dispatch = createEventDispatcher<{ close: void }>();
function handleKeydown(event: KeyboardEvent) {
// close on ESC
if (event.key === "Escape") {
event.preventDefault();
dispatch("close");
}
}
function handleBackdropClick(event: MouseEvent) {
if (window?.getSelection()?.toString()) {
return;
}
if (event.target === backdropEl) {
dispatch("close");
}
}
onMount(() => {
document.getElementById("app")?.setAttribute("inert", "true");
modalEl?.focus();
});
onDestroy(() => {
if (!browser) return;
document.getElementById("app")?.removeAttribute("inert");
});
</script>
<Portal>
<!-- svelte-ignore a11y_no_noninteractive_element_interactions -->
<div
role="presentation"
tabindex="-1"
bind:this={backdropEl}
onclick={(e) => {
e.stopPropagation();
handleBackdropClick(e);
}}
transition:fade|local={{ easing: cubicOut, duration: 300 }}
class="fixed inset-0 z-40 flex items-center justify-center bg-black/80 p-8 backdrop-blur-sm dark:bg-black/50"
>
<div
role="dialog"
tabindex="-1"
bind:this={modalEl}
onkeydown={handleKeydown}
in:fly={{ y: 100 }}
class={[
"max-h-[90dvh] overflow-y-auto overflow-x-hidden rounded-2xl bg-white shadow-2xl outline-none sm:-mt-10",
width,
]}
>
{@render children?.()}
</div>
</div>
</Portal>
| chat-ui/src/lib/components/Modal.svelte/0 | {
"file_path": "chat-ui/src/lib/components/Modal.svelte",
"repo_id": "chat-ui",
"token_count": 734
} |
<script lang="ts">
import ToolLogo from "./ToolLogo.svelte";
import { base } from "$app/paths";
import { browser } from "$app/environment";
interface Props {
toolId: string;
}
let { toolId }: Props = $props();
</script>
<div
class="relative flex items-center justify-center space-x-2 rounded border border-gray-300 bg-gray-200 px-2 py-1"
>
{#if browser}
{#await fetch(`${base}/api/tools/${toolId}`).then((res) => res.json()) then value}
{#key value.color + value.icon}
<ToolLogo color={value.color} icon={value.icon} size="sm" />
{/key}
<div class="flex flex-col items-center justify-center py-1">
<a
href={`${base}/tools/${value._id}`}
target="_blank"
class="line-clamp-1 truncate font-semibold text-blue-600 hover:underline"
>{value.displayName}</a
>
{#if value.createdByName}
<p class="text-center text-xs text-gray-500">
Created by
<a class="underline" href="{base}/tools?user={value.createdByName}" target="_blank"
>{value.createdByName}</a
>
</p>
{:else}
<p class="text-center text-xs text-gray-500">Official HuggingChat tool</p>
{/if}
</div>
{/await}
{/if}
</div>
| chat-ui/src/lib/components/ToolBadge.svelte/0 | {
"file_path": "chat-ui/src/lib/components/ToolBadge.svelte",
"repo_id": "chat-ui",
"token_count": 523
} |
<script lang="ts">
import { MessageToolUpdateType, type MessageToolUpdate } from "$lib/types/MessageUpdate";
import {
isMessageToolCallUpdate,
isMessageToolErrorUpdate,
isMessageToolResultUpdate,
} from "$lib/utils/messageUpdates";
import CarbonTools from "~icons/carbon/tools";
import { ToolResultStatus, type ToolFront } from "$lib/types/Tool";
import { page } from "$app/state";
import { onDestroy } from "svelte";
import { browser } from "$app/environment";
interface Props {
tool: MessageToolUpdate[];
loading?: boolean;
}
let { tool, loading = false }: Props = $props();
const toolFnName = tool.find(isMessageToolCallUpdate)?.call.name;
let toolError = $derived(tool.some(isMessageToolErrorUpdate));
let toolDone = $derived(tool.some(isMessageToolResultUpdate));
let eta = $derived(tool.find((el) => el.subtype === MessageToolUpdateType.ETA)?.eta);
const availableTools: ToolFront[] = page.data.tools;
let loadingBarEl: HTMLDivElement | undefined = $state();
let animation: Animation | undefined = $state(undefined);
let isShowingLoadingBar = $state(false);
$effect(() => {
!toolError &&
!toolDone &&
loading &&
loadingBarEl &&
eta &&
(() => {
loadingBarEl.classList.remove("hidden");
isShowingLoadingBar = true;
animation = loadingBarEl.animate([{ width: "0%" }, { width: "calc(100%+1rem)" }], {
duration: eta * 1000,
fill: "forwards",
});
})();
});
onDestroy(() => {
if (animation) {
animation.cancel();
}
});
// go to 100% quickly if loading is done
$effect(() => {
(!loading || toolDone || toolError) &&
browser &&
loadingBarEl &&
isShowingLoadingBar &&
(() => {
isShowingLoadingBar = false;
loadingBarEl.classList.remove("hidden");
animation?.cancel();
animation = loadingBarEl.animate(
[{ width: loadingBarEl.style.width }, { width: "calc(100%+1rem)" }],
{
duration: 300,
fill: "forwards",
}
);
setTimeout(() => {
loadingBarEl?.classList.add("hidden");
}, 300);
})();
});
</script>
{#if toolFnName && toolFnName !== "websearch"}
<details
class="group/tool my-2.5 w-fit cursor-pointer rounded-lg border border-gray-200 bg-white pl-1 pr-2.5 text-sm shadow-sm transition-all open:mb-3
open:border-purple-500/10 open:bg-purple-600/5 open:shadow-sm dark:border-gray-800 dark:bg-gray-900 open:dark:border-purple-800/40 open:dark:bg-purple-800/10"
>
<summary
class="relative flex select-none list-none items-center gap-1.5 py-1 group-open/tool:text-purple-700 group-open/tool:dark:text-purple-300"
>
<div
bind:this={loadingBarEl}
class="absolute -m-1 hidden h-full w-[calc(100%+1rem)] rounded-lg bg-purple-500/5 transition-all dark:bg-purple-500/10"
></div>
<div
class="relative grid size-[22px] place-items-center rounded bg-purple-600/10 dark:bg-purple-600/20"
>
<svg
class="absolute inset-0 text-purple-500/40 transition-opacity"
class:invisible={toolDone || toolError}
width="22"
height="22"
viewBox="0 0 38 38"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
class="loading-path"
d="M8 2.5H30C30 2.5 35.5 2.5 35.5 8V30C35.5 30 35.5 35.5 30 35.5H8C8 35.5 2.5 35.5 2.5 30V8C2.5 8 2.5 2.5 8 2.5Z"
stroke="currentColor"
stroke-width="1"
stroke-linecap="round"
id="shape"
/>
</svg>
<CarbonTools class="text-xs text-purple-700 dark:text-purple-500" />
</div>
<span>
{toolError ? "Error calling" : toolDone ? "Called" : "Calling"} tool
<span class="font-semibold"
>{availableTools.find((tool) => tool.name === toolFnName)?.displayName ??
toolFnName}</span
>
</span>
</summary>
{#each tool as toolUpdate}
{#if toolUpdate.subtype === MessageToolUpdateType.Call}
<div class="mt-1 flex items-center gap-2 opacity-80">
<h3 class="text-sm">Parameters</h3>
<div class="h-px flex-1 bg-gradient-to-r from-gray-500/20"></div>
</div>
<ul class="py-1 text-sm">
{#each Object.entries(toolUpdate.call.parameters ?? {}) as [k, v]}
{#if v !== null}
<li>
<span class="font-semibold">{k}</span>:
<span>{v}</span>
</li>
{/if}
{/each}
</ul>
{:else if toolUpdate.subtype === MessageToolUpdateType.Error}
<div class="mt-1 flex items-center gap-2 opacity-80">
<h3 class="text-sm">Error</h3>
<div class="h-px flex-1 bg-gradient-to-r from-gray-500/20"></div>
</div>
<p class="text-sm">{toolUpdate.message}</p>
{:else if isMessageToolResultUpdate(toolUpdate) && toolUpdate.result.status === ToolResultStatus.Success && toolUpdate.result.display}
<div class="mt-1 flex items-center gap-2 opacity-80">
<h3 class="text-sm">Result</h3>
<div class="h-px flex-1 bg-gradient-to-r from-gray-500/20"></div>
</div>
<ul class="py-1 text-sm">
{#each toolUpdate.result.outputs as output}
{#each Object.entries(output) as [k, v]}
{#if v !== null}
<li>
<span class="font-semibold">{k}</span>:
<span>{v}</span>
</li>
{/if}
{/each}
{/each}
</ul>
{/if}
{/each}
</details>
{/if}
<style>
details summary::-webkit-details-marker {
display: none;
}
.loading-path {
stroke-dasharray: 61.45;
animation: loading 2s linear infinite;
}
</style>
| chat-ui/src/lib/components/chat/ToolUpdate.svelte/0 | {
"file_path": "chat-ui/src/lib/components/chat/ToolUpdate.svelte",
"repo_id": "chat-ui",
"token_count": 2368
} |
import type { Migration } from ".";
import { collections } from "$lib/server/database";
import { Collection, FindCursor, ObjectId } from "mongodb";
import { logger } from "$lib/server/logger";
import type { Conversation } from "$lib/types/Conversation";
const BATCH_SIZE = 1000;
const DELETE_THRESHOLD_MS = 60 * 60 * 1000;
async function deleteBatch(conversations: Collection<Conversation>, ids: ObjectId[]) {
if (ids.length === 0) return 0;
const deleteResult = await conversations.deleteMany({ _id: { $in: ids } });
return deleteResult.deletedCount;
}
async function processCursor<T>(
cursor: FindCursor<T>,
processBatchFn: (batch: T[]) => Promise<void>
) {
let batch = [];
while (await cursor.hasNext()) {
const doc = await cursor.next();
if (doc) {
batch.push(doc);
}
if (batch.length >= BATCH_SIZE) {
await processBatchFn(batch);
batch = [];
}
}
if (batch.length > 0) {
await processBatchFn(batch);
}
}
export async function deleteConversations(
collections: typeof import("$lib/server/database").collections
) {
let deleteCount = 0;
const { conversations, sessions } = collections;
// First criteria: Delete conversations with no user/assistant messages older than 1 hour
const emptyConvCursor = conversations
.find({
"messages.from": { $not: { $in: ["user", "assistant"] } },
createdAt: { $lt: new Date(Date.now() - DELETE_THRESHOLD_MS) },
})
.batchSize(BATCH_SIZE);
await processCursor(emptyConvCursor, async (batch) => {
const ids = batch.map((doc) => doc._id);
deleteCount += await deleteBatch(conversations, ids);
});
// Second criteria: Process conversations without users in batches and check sessions
const noUserCursor = conversations.find({ userId: { $exists: false } }).batchSize(BATCH_SIZE);
await processCursor(noUserCursor, async (batch) => {
const sessionIds = [
...new Set(batch.map((conv) => conv.sessionId).filter((id): id is string => !!id)),
];
const existingSessions = await sessions.find({ sessionId: { $in: sessionIds } }).toArray();
const validSessionIds = new Set(existingSessions.map((s) => s.sessionId));
const invalidConvs = batch.filter(
(conv) => !conv.sessionId || !validSessionIds.has(conv.sessionId)
);
const idsToDelete = invalidConvs.map((conv) => conv._id);
deleteCount += await deleteBatch(conversations, idsToDelete);
});
logger.info(`[MIGRATIONS] Deleted ${deleteCount} conversations in total.`);
return deleteCount;
}
const deleteEmptyConversations: Migration = {
_id: new ObjectId("000000000000000000000009"),
name: "Delete conversations with no user or assistant messages or valid sessions",
up: async () => {
await deleteConversations(collections);
return true;
},
runEveryTime: false,
runForHuggingChat: "only",
};
export default deleteEmptyConversations;
| chat-ui/src/lib/migrations/routines/09-delete-empty-conversations.ts/0 | {
"file_path": "chat-ui/src/lib/migrations/routines/09-delete-empty-conversations.ts",
"repo_id": "chat-ui",
"token_count": 950
} |
import { z } from "zod";
import type { Endpoint } from "../endpoints";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import { env } from "$env/dynamic/private";
import { logger } from "$lib/server/logger";
export const endpointCloudflareParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("cloudflare"),
accountId: z.string().default(env.CLOUDFLARE_ACCOUNT_ID),
apiToken: z.string().default(env.CLOUDFLARE_API_TOKEN),
});
export async function endpointCloudflare(
input: z.input<typeof endpointCloudflareParametersSchema>
): Promise<Endpoint> {
const { accountId, apiToken, model } = endpointCloudflareParametersSchema.parse(input);
if (!model.id.startsWith("@")) {
model.id = "@hf/" + model.id;
}
const apiURL = `https://api.cloudflare.com/client/v4/accounts/${accountId}/ai/run/${model.id}`;
return async ({ messages, preprompt, generateSettings }) => {
let messagesFormatted = messages.map((message) => ({
role: message.from,
content: message.content,
}));
if (messagesFormatted?.[0]?.role !== "system") {
messagesFormatted = [{ role: "system", content: preprompt ?? "" }, ...messagesFormatted];
}
const parameters = { ...model.parameters, ...generateSettings };
const payload = JSON.stringify({
messages: messagesFormatted,
stream: true,
max_tokens: parameters?.max_new_tokens,
temperature: parameters?.temperature,
top_p: parameters?.top_p,
top_k: parameters?.top_k,
repetition_penalty: parameters?.repetition_penalty,
});
const res = await fetch(apiURL, {
method: "POST",
headers: {
Authorization: `Bearer ${apiToken}`,
"Content-Type": "application/json",
},
body: payload,
});
if (!res.ok) {
throw new Error(`Failed to generate text: ${await res.text()}`);
}
const encoder = new TextDecoderStream();
const reader = res.body?.pipeThrough(encoder).getReader();
return (async function* () {
let stop = false;
let generatedText = "";
let tokenId = 0;
let accumulatedData = ""; // Buffer to accumulate data chunks
while (!stop) {
const out = await reader?.read();
// If it's done, we cancel
if (out?.done) {
reader?.cancel();
return;
}
if (!out?.value) {
return;
}
// Accumulate the data chunk
accumulatedData += out.value;
// Process each complete JSON object in the accumulated data
while (accumulatedData.includes("\n")) {
// Assuming each JSON object ends with a newline
const endIndex = accumulatedData.indexOf("\n");
let jsonString = accumulatedData.substring(0, endIndex).trim();
// Remove the processed part from the buffer
accumulatedData = accumulatedData.substring(endIndex + 1);
if (jsonString.startsWith("data: ")) {
jsonString = jsonString.slice(6);
let data = null;
if (jsonString === "[DONE]") {
stop = true;
yield {
token: {
id: tokenId++,
text: "",
logprob: 0,
special: true,
},
generated_text: generatedText,
details: null,
} satisfies TextGenerationStreamOutput;
reader?.cancel();
continue;
}
try {
data = JSON.parse(jsonString);
} catch (e) {
logger.error(e, "Failed to parse JSON");
logger.error(jsonString, "Problematic JSON string:");
continue; // Skip this iteration and try the next chunk
}
// Handle the parsed data
if (data.response) {
generatedText += data.response ?? "";
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: data.response ?? "",
logprob: 0,
special: false,
},
generated_text: null,
details: null,
};
yield output;
}
}
}
}
})();
};
}
export default endpointCloudflare;
| chat-ui/src/lib/server/endpoints/cloudflare/endpointCloudflare.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/cloudflare/endpointCloudflare.ts",
"repo_id": "chat-ui",
"token_count": 1621
} |
import { error } from "@sveltejs/kit";
import { collections } from "$lib/server/database";
import type { Conversation } from "$lib/types/Conversation";
import type { SharedConversation } from "$lib/types/SharedConversation";
import type { MessageFile } from "$lib/types/Message";
export async function downloadFile(
sha256: string,
convId: Conversation["_id"] | SharedConversation["_id"]
): Promise<MessageFile & { type: "base64" }> {
const fileId = collections.bucket.find({ filename: `${convId.toString()}-${sha256}` });
const file = await fileId.next();
if (!file) {
error(404, "File not found");
}
if (file.metadata?.conversation !== convId.toString()) {
error(403, "You don't have access to this file.");
}
const mime = file.metadata?.mime;
const name = file.filename;
const fileStream = collections.bucket.openDownloadStream(file._id);
const buffer = await new Promise<Buffer>((resolve, reject) => {
const chunks: Uint8Array[] = [];
fileStream.on("data", (chunk) => chunks.push(chunk));
fileStream.on("error", reject);
fileStream.on("end", () => resolve(Buffer.concat(chunks)));
});
return { type: "base64", name, value: buffer.toString("base64"), mime };
}
| chat-ui/src/lib/server/files/downloadFile.ts/0 | {
"file_path": "chat-ui/src/lib/server/files/downloadFile.ts",
"repo_id": "chat-ui",
"token_count": 397
} |
import { env } from "$env/dynamic/private";
import type { ChatTemplateInput } from "$lib/types/Template";
import { compileTemplate } from "$lib/utils/template";
import { z } from "zod";
import endpoints, { endpointSchema, type Endpoint } from "./endpoints/endpoints";
import { endpointTgi } from "./endpoints/tgi/endpointTgi";
import { sum } from "$lib/utils/sum";
import { embeddingModels, validateEmbeddingModelByName } from "./embeddingModels";
import type { PreTrainedTokenizer } from "@huggingface/transformers";
import JSON5 from "json5";
import { getTokenizer } from "$lib/utils/getTokenizer";
import { logger } from "$lib/server/logger";
import { ToolResultStatus, type ToolInput } from "$lib/types/Tool";
import { isHuggingChat } from "$lib/utils/isHuggingChat";
type Optional<T, K extends keyof T> = Pick<Partial<T>, K> & Omit<T, K>;
const reasoningSchema = z.union([
z.object({
type: z.literal("regex"), // everything is reasoning, extract the answer from the regex
regex: z.string(),
}),
z.object({
type: z.literal("tokens"), // use beginning and end tokens that define the reasoning portion of the answer
beginToken: z.string(),
endToken: z.string(),
}),
z.object({
type: z.literal("summarize"), // everything is reasoning, summarize the answer
}),
]);
const modelConfig = z.object({
/** Used as an identifier in DB */
id: z.string().optional(),
/** Used to link to the model page, and for inference */
name: z.string().default(""),
displayName: z.string().min(1).optional(),
description: z.string().min(1).optional(),
logoUrl: z.string().url().optional(),
websiteUrl: z.string().url().optional(),
modelUrl: z.string().url().optional(),
tokenizer: z
.union([
z.string(),
z.object({
tokenizerUrl: z.string().url(),
tokenizerConfigUrl: z.string().url(),
}),
])
.optional(),
datasetName: z.string().min(1).optional(),
datasetUrl: z.string().url().optional(),
preprompt: z.string().default(""),
prepromptUrl: z.string().url().optional(),
chatPromptTemplate: z.string().optional(),
promptExamples: z
.array(
z.object({
title: z.string().min(1),
prompt: z.string().min(1),
})
)
.optional(),
endpoints: z.array(endpointSchema).optional(),
parameters: z
.object({
temperature: z.number().min(0).max(2).optional(),
truncate: z.number().int().positive().optional(),
max_new_tokens: z.number().int().positive().optional(),
stop: z.array(z.string()).optional(),
top_p: z.number().positive().optional(),
top_k: z.number().positive().optional(),
repetition_penalty: z.number().min(-2).max(2).optional(),
presence_penalty: z.number().min(-2).max(2).optional(),
})
.passthrough()
.optional(),
multimodal: z.boolean().default(false),
multimodalAcceptedMimetypes: z.array(z.string()).optional(),
tools: z.boolean().default(false),
unlisted: z.boolean().default(false),
embeddingModel: validateEmbeddingModelByName(embeddingModels).optional(),
/** Used to enable/disable system prompt usage */
systemRoleSupported: z.boolean().default(true),
reasoning: reasoningSchema.optional(),
});
const modelsRaw = z.array(modelConfig).parse(JSON5.parse(env.MODELS));
async function getChatPromptRender(
m: z.infer<typeof modelConfig>
): Promise<ReturnType<typeof compileTemplate<ChatTemplateInput>>> {
if (m.chatPromptTemplate) {
return compileTemplate<ChatTemplateInput>(m.chatPromptTemplate, m);
}
let tokenizer: PreTrainedTokenizer;
try {
tokenizer = await getTokenizer(m.tokenizer ?? m.id ?? m.name);
} catch (e) {
// if fetching the tokenizer fails but it wasnt manually set, use the default template
if (!m.tokenizer) {
logger.warn(
`No tokenizer found for model ${m.name}, using default template. Consider setting tokenizer manually or making sure the model is available on the hub.`,
m
);
return compileTemplate<ChatTemplateInput>(
"{{#if @root.preprompt}}<|im_start|>system\n{{@root.preprompt}}<|im_end|>\n{{/if}}{{#each messages}}{{#ifUser}}<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n{{/ifUser}}{{#ifAssistant}}{{content}}<|im_end|>\n{{/ifAssistant}}{{/each}}",
m
);
}
logger.error(
e,
`Failed to load tokenizer ${
m.tokenizer ?? m.id ?? m.name
} make sure the model is available on the hub and you have access to any gated models.`
);
process.exit();
}
const renderTemplate = ({
messages,
preprompt,
tools,
toolResults,
continueMessage,
}: ChatTemplateInput) => {
let formattedMessages: {
role: string;
content: string;
tool_calls?: { id: string; tool_call_id: string; output: string }[];
}[] = messages.map((message) => ({
content: message.content,
role: message.from,
}));
if (!m.systemRoleSupported) {
const firstSystemMessage = formattedMessages.find((msg) => msg.role === "system");
formattedMessages = formattedMessages.filter((msg) => msg.role !== "system");
if (
firstSystemMessage &&
formattedMessages.length > 0 &&
formattedMessages[0].role === "user"
) {
formattedMessages[0].content =
firstSystemMessage.content + "\n" + formattedMessages[0].content;
}
}
if (preprompt && formattedMessages[0].role !== "system") {
formattedMessages = [
{
role: m.systemRoleSupported ? "system" : "user",
content: preprompt,
},
...formattedMessages,
];
}
if (toolResults?.length) {
// todo: should update the command r+ tokenizer to support system messages at any location
// or use the `rag` mode without the citations
const id = m.id ?? m.name;
if (isHuggingChat && id.startsWith("CohereForAI")) {
formattedMessages = [
{
role: "user",
content:
"\n\n<results>\n" +
toolResults
.flatMap((result, idx) => {
if (result.status === ToolResultStatus.Error) {
return (
`Document: ${idx}\n` + `Tool "${result.call.name}" error\n` + result.message
);
}
return (
`Document: ${idx}\n` +
result.outputs
.flatMap((output) =>
Object.entries(output).map(([title, text]) => `${title}\n${text}`)
)
.join("\n")
);
})
.join("\n\n") +
"\n</results>",
},
...formattedMessages,
];
} else if (isHuggingChat && id.startsWith("meta-llama")) {
const results = toolResults.flatMap((result) => {
if (result.status === ToolResultStatus.Error) {
return [
{
tool_call_id: result.call.name,
output: "Error: " + result.message,
},
];
} else {
return result.outputs.map((output) => ({
tool_call_id: result.call.name,
output: JSON.stringify(output),
}));
}
});
formattedMessages = [
...formattedMessages,
{
role: "python",
content: JSON.stringify(results),
},
];
} else {
formattedMessages = [
...formattedMessages,
{
role: m.systemRoleSupported ? "system" : "user",
content: JSON.stringify(toolResults),
},
];
}
tools = [];
}
const mappedTools =
tools?.map((tool) => {
const inputs: Record<
string,
{
type: ToolInput["type"];
description: string;
required: boolean;
}
> = {};
for (const value of tool.inputs) {
if (value.paramType !== "fixed") {
inputs[value.name] = {
type: value.type,
description: value.description ?? "",
required: value.paramType === "required",
};
}
}
return {
name: tool.name,
description: tool.description,
parameter_definitions: inputs,
};
}) ?? [];
const output = tokenizer.apply_chat_template(formattedMessages, {
tokenize: false,
add_generation_prompt: !continueMessage,
tools: mappedTools.length ? mappedTools : undefined,
});
if (typeof output !== "string") {
throw new Error("Failed to apply chat template, the output is not a string");
}
return output;
};
return renderTemplate;
}
const processModel = async (m: z.infer<typeof modelConfig>) => ({
...m,
chatPromptRender: await getChatPromptRender(m),
id: m.id || m.name,
displayName: m.displayName || m.name,
preprompt: m.prepromptUrl ? await fetch(m.prepromptUrl).then((r) => r.text()) : m.preprompt,
parameters: { ...m.parameters, stop_sequences: m.parameters?.stop },
});
const addEndpoint = (m: Awaited<ReturnType<typeof processModel>>) => ({
...m,
getEndpoint: async (): Promise<Endpoint> => {
if (!m.endpoints) {
return endpointTgi({
type: "tgi",
url: `${env.HF_API_ROOT}/${m.name}`,
accessToken: env.HF_TOKEN ?? env.HF_ACCESS_TOKEN,
weight: 1,
model: m,
});
}
const totalWeight = sum(m.endpoints.map((e) => e.weight));
let random = Math.random() * totalWeight;
for (const endpoint of m.endpoints) {
if (random < endpoint.weight) {
const args = { ...endpoint, model: m };
switch (args.type) {
case "tgi":
return endpoints.tgi(args);
case "anthropic":
return endpoints.anthropic(args);
case "anthropic-vertex":
return endpoints.anthropicvertex(args);
case "bedrock":
return endpoints.bedrock(args);
case "aws":
return await endpoints.aws(args);
case "openai":
return await endpoints.openai(args);
case "llamacpp":
return endpoints.llamacpp(args);
case "ollama":
return endpoints.ollama(args);
case "vertex":
return await endpoints.vertex(args);
case "genai":
return await endpoints.genai(args);
case "cloudflare":
return await endpoints.cloudflare(args);
case "cohere":
return await endpoints.cohere(args);
case "langserve":
return await endpoints.langserve(args);
default:
// for legacy reason
return endpoints.tgi(args);
}
}
random -= endpoint.weight;
}
throw new Error(`Failed to select endpoint`);
},
});
const inferenceApiIds = isHuggingChat
? await fetch(
"https://huggingface.co/api/models?pipeline_tag=text-generation&inference=warm&filter=conversational"
)
.then((r) => r.json())
.then((json) => json.map((r: { id: string }) => r.id))
.catch((err) => {
logger.error(err, "Failed to fetch inference API ids");
return [];
})
: [];
export const models = await Promise.all(
modelsRaw.map((e) =>
processModel(e)
.then(addEndpoint)
.then(async (m) => ({
...m,
hasInferenceAPI: inferenceApiIds.includes(m.id ?? m.name),
}))
)
);
export type ProcessedModel = (typeof models)[number];
// super ugly but not sure how to make typescript happier
export const validModelIdSchema = z.enum(models.map((m) => m.id) as [string, ...string[]]);
export const defaultModel = models[0];
// Models that have been deprecated
export const oldModels = env.OLD_MODELS
? z
.array(
z.object({
id: z.string().optional(),
name: z.string().min(1),
displayName: z.string().min(1).optional(),
transferTo: validModelIdSchema.optional(),
})
)
.parse(JSON5.parse(env.OLD_MODELS))
.map((m) => ({ ...m, id: m.id || m.name, displayName: m.displayName || m.name }))
: [];
export const validateModel = (_models: BackendModel[]) => {
// Zod enum function requires 2 parameters
return z.enum([_models[0].id, ..._models.slice(1).map((m) => m.id)]);
};
// if `TASK_MODEL` is string & name of a model in `MODELS`, then we use `MODELS[TASK_MODEL]`, else we try to parse `TASK_MODEL` as a model config itself
export const smallModel = env.TASK_MODEL
? ((models.find((m) => m.name === env.TASK_MODEL) ||
(await processModel(modelConfig.parse(JSON5.parse(env.TASK_MODEL))).then((m) =>
addEndpoint(m)
))) ??
defaultModel)
: defaultModel;
export type BackendModel = Optional<
typeof defaultModel,
"preprompt" | "parameters" | "multimodal" | "unlisted" | "tools" | "hasInferenceAPI"
>;
| chat-ui/src/lib/server/models.ts/0 | {
"file_path": "chat-ui/src/lib/server/models.ts",
"repo_id": "chat-ui",
"token_count": 4853
} |
import { stringifyMarkdownElementTree } from "$lib/server/websearch/markdown/utils/stringify";
import { scrapeUrl } from "$lib/server/websearch/scrape/scrape";
import type { ConfigTool } from "$lib/types/Tool";
import { ObjectId } from "mongodb";
const fetchUrl: ConfigTool = {
_id: new ObjectId("00000000000000000000000B"),
type: "config",
description: "Fetch the contents of a URL",
color: "blue",
icon: "cloud",
displayName: "Fetch URL",
name: "fetchUrl",
endpoint: null,
inputs: [
{
name: "url",
type: "str",
description: "The URL of the webpage to fetch",
paramType: "required",
},
],
outputComponent: null,
outputComponentIdx: null,
showOutput: false,
async *call({ url }) {
const blocks = String(url).split("\n");
const urlStr = blocks[blocks.length - 1];
const { title, markdownTree } = await scrapeUrl(urlStr, Infinity);
return {
outputs: [{ title, text: stringifyMarkdownElementTree(markdownTree) }],
display: false,
};
},
};
export default fetchUrl;
| chat-ui/src/lib/server/tools/web/url.ts/0 | {
"file_path": "chat-ui/src/lib/server/tools/web/url.ts",
"repo_id": "chat-ui",
"token_count": 357
} |
import { WebSearchProvider, type WebSearchSource } from "$lib/types/WebSearch";
import { env } from "$env/dynamic/private";
import searchSerper from "./endpoints/serper";
import searchSerpApi from "./endpoints/serpApi";
import searchSerpStack from "./endpoints/serpStack";
import searchYouApi from "./endpoints/youApi";
import searchWebLocal from "./endpoints/webLocal";
import searchSearxng from "./endpoints/searxng";
import searchSearchApi from "./endpoints/searchApi";
import searchBing from "./endpoints/bing";
export function getWebSearchProvider() {
if (env.YDC_API_KEY) return WebSearchProvider.YOU;
if (env.SEARXNG_QUERY_URL) return WebSearchProvider.SEARXNG;
if (env.BING_SUBSCRIPTION_KEY) return WebSearchProvider.BING;
return WebSearchProvider.GOOGLE;
}
/** Searches the web using the first available provider, based on the env */
export async function searchWeb(query: string): Promise<WebSearchSource[]> {
if (env.USE_LOCAL_WEBSEARCH) return searchWebLocal(query);
if (env.SEARXNG_QUERY_URL) return searchSearxng(query);
if (env.SERPER_API_KEY) return searchSerper(query);
if (env.YDC_API_KEY) return searchYouApi(query);
if (env.SERPAPI_KEY) return searchSerpApi(query);
if (env.SERPSTACK_API_KEY) return searchSerpStack(query);
if (env.SEARCHAPI_KEY) return searchSearchApi(query);
if (env.BING_SUBSCRIPTION_KEY) return searchBing(query);
throw new Error(
"No configuration found for web search. Please set USE_LOCAL_WEBSEARCH, SEARXNG_QUERY_URL, SERPER_API_KEY, YDC_API_KEY, SERPSTACK_API_KEY, or SEARCHAPI_KEY in your environment variables."
);
}
| chat-ui/src/lib/server/websearch/search/endpoints.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/search/endpoints.ts",
"repo_id": "chat-ui",
"token_count": 543
} |
import type { ObjectId } from "mongodb";
import type { User } from "./User";
import type { Assistant } from "./Assistant";
import type { Timestamps } from "./Timestamps";
export interface Report extends Timestamps {
_id: ObjectId;
createdBy: User["_id"] | string;
object: "assistant" | "tool";
contentId: Assistant["_id"];
reason?: string;
}
| chat-ui/src/lib/types/Report.ts/0 | {
"file_path": "chat-ui/src/lib/types/Report.ts",
"repo_id": "chat-ui",
"token_count": 112
} |
type UUID = ReturnType<typeof crypto.randomUUID>;
export function randomUUID(): UUID {
// Only on old safari / ios
if (!("randomUUID" in crypto)) {
return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) =>
(
Number(c) ^
(crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (Number(c) / 4)))
).toString(16)
) as UUID;
}
return crypto.randomUUID();
}
| chat-ui/src/lib/utils/randomUuid.ts/0 | {
"file_path": "chat-ui/src/lib/utils/randomUuid.ts",
"repo_id": "chat-ui",
"token_count": 166
} |
import type { Conversation } from "$lib/types/Conversation";
import type { Message } from "$lib/types/Message";
export function buildSubtree(
conv: Pick<Conversation, "messages" | "rootMessageId">,
id: Message["id"]
): Message[] {
if (!conv.rootMessageId) {
if (conv.messages.length === 0) return [];
// legacy conversation slice up to id
const index = conv.messages.findIndex((m) => m.id === id);
if (index === -1) throw new Error("Message not found");
return conv.messages.slice(0, index + 1);
} else {
// find the message with the right id then create the ancestor tree
const message = conv.messages.find((m) => m.id === id);
if (!message) throw new Error("Message not found");
return [
...(message.ancestors?.map((ancestorId) => {
const ancestor = conv.messages.find((m) => m.id === ancestorId);
if (!ancestor) throw new Error("Ancestor not found");
return ancestor;
}) ?? []),
message,
];
}
}
| chat-ui/src/lib/utils/tree/buildSubtree.ts/0 | {
"file_path": "chat-ui/src/lib/utils/tree/buildSubtree.ts",
"repo_id": "chat-ui",
"token_count": 329
} |
import { models } from "$lib/server/models";
export async function GET() {
const res = models
.filter((m) => m.unlisted == false)
.map((model) => ({
id: model.id,
name: model.name,
websiteUrl: model.websiteUrl ?? "https://huggingface.co",
modelUrl: model.modelUrl ?? "https://huggingface.co",
tokenizer: model.tokenizer,
datasetName: model.datasetName,
datasetUrl: model.datasetUrl,
displayName: model.displayName,
description: model.description ?? "",
logoUrl: model.logoUrl,
promptExamples: model.promptExamples ?? [],
preprompt: model.preprompt ?? "",
multimodal: model.multimodal ?? false,
unlisted: model.unlisted ?? false,
tools: model.tools ?? false,
hasInferenceAPI: model.hasInferenceAPI ?? false,
}));
return Response.json(res);
}
| chat-ui/src/routes/api/models/+server.ts/0 | {
"file_path": "chat-ui/src/routes/api/models/+server.ts",
"repo_id": "chat-ui",
"token_count": 311
} |
import { buildPrompt } from "$lib/buildPrompt";
import { authCondition } from "$lib/server/auth";
import { collections } from "$lib/server/database";
import { models } from "$lib/server/models";
import { buildSubtree } from "$lib/utils/tree/buildSubtree";
import { isMessageId } from "$lib/utils/tree/isMessageId";
import { error } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
export async function GET({ params, locals }) {
const conv =
params.id.length === 7
? await collections.sharedConversations.findOne({
_id: params.id,
})
: await collections.conversations.findOne({
_id: new ObjectId(params.id),
...authCondition(locals),
});
if (conv === null) {
error(404, "Conversation not found");
}
const messageId = params.messageId;
const messageIndex = conv.messages.findIndex((msg) => msg.id === messageId);
if (!isMessageId(messageId) || messageIndex === -1) {
error(404, "Message not found");
}
const model = models.find((m) => m.id === conv.model);
if (!model) {
error(404, "Conversation model not found");
}
const messagesUpTo = buildSubtree(conv, messageId);
const prompt = await buildPrompt({
preprompt: conv.preprompt,
messages: messagesUpTo,
model,
});
const userMessage = conv.messages[messageIndex];
const assistantMessage = conv.messages[messageIndex + 1];
return new Response(
JSON.stringify(
{
note: "This is a preview of the prompt that will be sent to the model when retrying the message. It may differ from what was sent in the past if the parameters have been updated since",
prompt,
model: model.name,
parameters: {
...model.parameters,
return_full_text: false,
},
userMessage,
...(assistantMessage ? { assistantMessage } : {}),
},
null,
2
),
{ headers: { "Content-Type": "application/json" } }
);
}
| chat-ui/src/routes/conversation/[id]/message/[messageId]/prompt/+server.ts/0 | {
"file_path": "chat-ui/src/routes/conversation/[id]/message/[messageId]/prompt/+server.ts",
"repo_id": "chat-ui",
"token_count": 654
} |
<script lang="ts">
import { env as envPublic } from "$env/dynamic/public";
import { isHuggingChat } from "$lib/utils/isHuggingChat";
import logo from "../../../../../static/huggingchat/logo.svg?raw";
interface Props {
name: string;
logoUrl: string | undefined;
}
let { name, logoUrl }: Props = $props();
</script>
<div class=" flex h-[648px] w-full flex-col items-center bg-white">
<div class="flex flex-1 flex-col items-center justify-center">
{#if logoUrl}
<img class="h-48 w-48" src={logoUrl} alt="avatar" />
{/if}
<h1 class="m-0 text-5xl font-bold text-black">
{name}
</h1>
</div>
<div
class="flex h-[200px] w-full flex-col items-center justify-center rounded-b-none bg-{envPublic.PUBLIC_APP_COLOR}-500/10 pb-10 pt-10 text-4xl text-gray-500"
style="border-radius: 100% 100% 0 0;"
>
Try it now
{#if isHuggingChat}
on
{/if}
{#if isHuggingChat}
<div class="flex flex-row pt-3 text-5xl font-bold text-black">
<div class="mr-5 flex items-center justify-center" id="logo">
<!-- eslint-disable-next-line -->
{@html logo}
</div>
<span>HuggingChat</span>
</div>
{/if}
</div>
</div>
| chat-ui/src/routes/models/[...model]/thumbnail.png/ModelThumbnail.svelte/0 | {
"file_path": "chat-ui/src/routes/models/[...model]/thumbnail.png/ModelThumbnail.svelte",
"repo_id": "chat-ui",
"token_count": 503
} |
<script lang="ts">
import type { ActionData, PageData } from "./$types";
import AssistantSettings from "$lib/components/AssistantSettings.svelte";
interface Props {
data: PageData;
form: ActionData;
}
let { data, form = $bindable() }: Props = $props();
</script>
<AssistantSettings bind:form models={data.models} />
| chat-ui/src/routes/settings/(nav)/assistants/new/[email protected]/0 | {
"file_path": "chat-ui/src/routes/settings/(nav)/assistants/new/[email protected]",
"repo_id": "chat-ui",
"token_count": 108
} |
import { sveltekit } from "@sveltejs/kit/vite";
import Icons from "unplugin-icons/vite";
import { promises } from "fs";
import { defineConfig } from "vitest/config";
// used to load fonts server side for thumbnail generation
function loadTTFAsArrayBuffer() {
return {
name: "load-ttf-as-array-buffer",
async transform(_src, id) {
if (id.endsWith(".ttf")) {
return `export default new Uint8Array([
${new Uint8Array(await promises.readFile(id))}
]).buffer`;
}
},
};
}
export default defineConfig({
plugins: [
sveltekit(),
Icons({
compiler: "svelte",
}),
loadTTFAsArrayBuffer(),
],
optimizeDeps: {
include: [
"browser-image-resizer",
"uuid",
"@huggingface/transformers",
"sharp",
"@gradio/client",
],
},
server: {
open: "/",
},
test: {
setupFiles: ["./scripts/setupTest.ts"],
deps: { inline: ["@sveltejs/kit"] },
globals: true,
testTimeout: 10000,
},
});
| chat-ui/vite.config.ts/0 | {
"file_path": "chat-ui/vite.config.ts",
"repo_id": "chat-ui",
"token_count": 393
} |
import json
import sys
def format_json_to_md(input_json_file, output_md_file):
with open(input_json_file, encoding="utf-8") as f:
results = json.load(f)
output_md = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(results):
benchmark_res = results[benchmark_name]
benchmark_file_name = benchmark_name.split("/")[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}")
title = "| metric |"
lines = "|--------|"
value = "| new / old (diff) |"
for metric_name in sorted(benchmark_res):
metric_vals = benchmark_res[metric_name]
new_val = metric_vals["new"]
old_val = metric_vals.get("old", None)
dif_val = metric_vals.get("diff", None)
val_str = f" {new_val:f}" if isinstance(new_val, (int, float)) else "None"
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(old_val, (int, float)) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(dif_val, (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>")
with open(output_md_file, "w", encoding="utf-8") as f:
f.writelines("\n".join(output_md))
if __name__ == "__main__":
input_json_file = sys.argv[1]
output_md_file = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| datasets/benchmarks/format.py/0 | {
"file_path": "datasets/benchmarks/format.py",
"repo_id": "datasets",
"token_count": 746
} |
# Create an image dataset
There are two methods for creating and sharing an image dataset. This guide will show you how to:
* Create an image dataset from local files in python with [`Dataset.push_to_hub`]. This is an easy way that requires only a few steps in python.
* Create an image dataset with `ImageFolder` and some metadata. This is a no-code solution for quickly creating an image dataset with several thousand images.
<Tip>
You can control access to your dataset by requiring users to share their contact information first. Check out the [Gated datasets](https://huggingface.co/docs/hub/datasets-gated) guide for more information about how to enable this feature on the Hub.
</Tip>
## ImageFolder
The `ImageFolder` is a dataset builder designed to quickly load an image dataset with several thousand images without requiring you to write any code.
<Tip>
💡 Take a look at the [Split pattern hierarchy](repository_structure#split-pattern-hierarchy) to learn more about how `ImageFolder` creates dataset splits based on your dataset repository structure.
</Tip>
`ImageFolder` automatically infers the class labels of your dataset based on the directory name. Store your dataset in a directory structure like:
```
folder/train/dog/golden_retriever.png
folder/train/dog/german_shepherd.png
folder/train/dog/chihuahua.png
folder/train/cat/maine_coon.png
folder/train/cat/bengal.png
folder/train/cat/birman.png
```
Then users can load your dataset by specifying `imagefolder` in [`load_dataset`] and the directory in `data_dir`:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder")
```
You can also use `imagefolder` to load datasets involving multiple splits. To do so, your dataset directory should have the following structure:
```
folder/train/dog/golden_retriever.png
folder/train/cat/maine_coon.png
folder/test/dog/german_shepherd.png
folder/test/cat/bengal.png
```
<Tip warning={true}>
If all image files are contained in a single directory or if they are not on the same level of directory structure, `label` column won't be added automatically. If you need it, set `drop_labels=False` explicitly.
</Tip>
If there is additional information you'd like to include about your dataset, like text captions or bounding boxes, add it as a `metadata.csv` file in your folder. This lets you quickly create datasets for different computer vision tasks like text captioning or object detection. You can also use a JSONL file `metadata.jsonl`.
```
folder/train/metadata.csv
folder/train/0001.png
folder/train/0002.png
folder/train/0003.png
```
You can also zip your images:
```
folder/metadata.csv
folder/train.zip
folder/test.zip
folder/valid.zip
```
Your `metadata.csv` file must have a `file_name` column which links image files with their metadata:
```csv
file_name,additional_feature
0001.png,This is a first value of a text feature you added to your images
0002.png,This is a second value of a text feature you added to your images
0003.png,This is a third value of a text feature you added to your images
```
or using `metadata.jsonl`:
```jsonl
{"file_name": "0001.png", "additional_feature": "This is a first value of a text feature you added to your images"}
{"file_name": "0002.png", "additional_feature": "This is a second value of a text feature you added to your images"}
{"file_name": "0003.png", "additional_feature": "This is a third value of a text feature you added to your images"}
```
<Tip>
If metadata files are present, the inferred labels based on the directory name are dropped by default. To include those labels, set `drop_labels=False` in `load_dataset`.
</Tip>
### Image captioning
Image captioning datasets have text describing an image. An example `metadata.csv` may look like:
```csv
file_name,text
0001.png,This is a golden retriever playing with a ball
0002.png,A german shepherd
0003.png,One chihuahua
```
Load the dataset with `ImageFolder`, and it will create a `text` column for the image captions:
```py
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train")
>>> dataset[0]["text"]
"This is a golden retriever playing with a ball"
```
### Object detection
Object detection datasets have bounding boxes and categories identifying objects in an image. An example `metadata.jsonl` may look like:
```jsonl
{"file_name": "0001.png", "objects": {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]}}
{"file_name": "0002.png", "objects": {"bbox": [[810.0, 100.0, 57.0, 28.0]], "categories": [1]}}
{"file_name": "0003.png", "objects": {"bbox": [[160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], "categories": [2, 2]}}
```
Load the dataset with `ImageFolder`, and it will create a `objects` column with the bounding boxes and the categories:
```py
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train")
>>> dataset[0]["objects"]
{"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]}
```
### Upload dataset to the Hub
Once you've created a dataset, you can share it to the Hub with the [`~datasets.DatasetDict.push_to_hub`] method. Make sure you have the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library installed and you're logged in to your Hugging Face account (see the [Upload with Python tutorial](upload_dataset#upload-with-python) for more details).
Upload your dataset with [`~datasets.DatasetDict.push_to_hub`]:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train")
>>> dataset.push_to_hub("stevhliu/my-image-captioning-dataset")
```
## WebDataset
The [WebDataset](https://github.com/webdataset/webdataset) format is based on TAR archives and is suitable for big image datasets.
Indeed you can group your images in TAR archives (e.g. 1GB of images per TAR archive) and have thousands of TAR archives:
```
folder/train/00000.tar
folder/train/00001.tar
folder/train/00002.tar
...
```
In the archives, each example is made of files sharing the same prefix:
```
e39871fd9fd74f55.jpg
e39871fd9fd74f55.json
f18b91585c4d3f3e.jpg
f18b91585c4d3f3e.json
ede6e66b2fb59aab.jpg
ede6e66b2fb59aab.json
ed600d57fcee4f94.jpg
ed600d57fcee4f94.json
...
```
You can put your images labels/captions/bounding boxes using JSON or text files for example.
For more details on the WebDataset format and the python library, please check the [WebDataset documentation](https://webdataset.github.io/webdataset).
Load your WebDataset and it will create on column per file suffix (here "jpg" and "json"):
```python
>>> from datasets import load_dataset
>>> dataset = load_dataset("webdataset", data_dir="/path/to/folder", split="train")
>>> dataset[0]["json"]
{"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]}
```
## (Legacy) Loading script
Write a dataset loading script to share a dataset. It defines a dataset's splits and configurations, and handles downloading and generating a dataset. The script is located in the same folder or repository as the dataset and should have the same name.
```
my_dataset/
├── README.md
├── my_dataset.py
└── data/ # optional, may contain your images or TAR archives
```
This structure allows your dataset to be loaded in one line:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("path/to/my_dataset")
```
This guide will show you how to create a dataset loading script for image datasets, which is a bit different from <a class="underline decoration-green-400 decoration-2 font-semibold" href="./dataset_script">creating a loading script for text datasets</a>. You'll learn how to:
* Create a dataset builder class.
* Create dataset configurations.
* Add dataset metadata.
* Download and define the dataset splits.
* Generate the dataset.
* Generate the dataset metadata (optional).
* Upload the dataset to the Hub.
The best way to learn is to open up an existing image dataset loading script, like [Food-101](https://huggingface.co/datasets/food101/blob/main/food101.py), and follow along!
<Tip>
To help you get started, we created a loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py) you can copy and use as a starting point!
</Tip>
### Create a dataset builder class
[`GeneratorBasedBuilder`] is the base class for datasets generated from a dictionary generator. Within this class, there are three methods to help create your dataset:
* `info` stores information about your dataset like its description, license, and features.
* `split_generators` downloads the dataset and defines its splits.
* `generate_examples` generates the images and labels for each split.
Start by creating your dataset class as a subclass of [`GeneratorBasedBuilder`] and add the three methods. Don't worry about filling in each of these methods yet, you'll develop those over the next few sections:
```py
class Food101(datasets.GeneratorBasedBuilder):
"""Food-101 Images dataset"""
def _info(self):
def _split_generators(self, dl_manager):
def _generate_examples(self, images, metadata_path):
```
#### Multiple configurations
In some cases, a dataset may have more than one configuration. For example, if you check out the [Imagenette dataset](https://huggingface.co/datasets/frgfm/imagenette), you'll notice there are three subsets.
To create different configurations, use the [`BuilderConfig`] class to create a subclass for your dataset. Provide the links to download the images and labels in `data_url` and `metadata_urls`:
```py
class Food101Config(datasets.BuilderConfig):
"""Builder Config for Food-101"""
def __init__(self, data_url, metadata_urls, **kwargs):
"""BuilderConfig for Food-101.
Args:
data_url: `string`, url to download the zip file from.
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super(Food101Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_url = data_url
self.metadata_urls = metadata_urls
```
Now you can define your subsets at the top of [`GeneratorBasedBuilder`]. Imagine you want to create two subsets in the Food-101 dataset based on whether it is a breakfast or dinner food.
1. Define your subsets with `Food101Config` in a list in `BUILDER_CONFIGS`.
2. For each configuration, provide a name, description, and where to download the images and labels from.
```py
class Food101(datasets.GeneratorBasedBuilder):
"""Food-101 Images dataset"""
BUILDER_CONFIGS = [
Food101Config(
name="breakfast",
description="Food types commonly eaten during breakfast.",
data_url="https://link-to-breakfast-foods.zip",
metadata_urls={
"train": "https://link-to-breakfast-foods-train.txt",
"validation": "https://link-to-breakfast-foods-validation.txt"
},
,
Food101Config(
name="dinner",
description="Food types commonly eaten during dinner.",
data_url="https://link-to-dinner-foods.zip",
metadata_urls={
"train": "https://link-to-dinner-foods-train.txt",
"validation": "https://link-to-dinner-foods-validation.txt"
},
)...
]
```
Now if users want to load the `breakfast` configuration, they can use the configuration name:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("food101", "breakfast", split="train")
```
### Add dataset metadata
Adding information about your dataset is useful for users to learn more about it. This information is stored in the [`DatasetInfo`] class which is returned by the `info` method. Users can access this information by:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder("food101")
>>> ds_builder.info
```
There is a lot of information you can specify about your dataset, but some important ones to include are:
1. `description` provides a concise description of the dataset.
2. `features` specify the dataset column types. Since you're creating an image loading script, you'll need to include the [`Image`] feature.
3. `supervised_keys` specify the input feature and label.
4. `homepage` provides a link to the dataset homepage.
5. `citation` is a BibTeX citation of the dataset.
6. `license` states the dataset's license.
<Tip>
You'll notice a lot of the dataset information is defined earlier in the loading script which makes it easier to read. There are also other [`~Datasets.Features`] you can input, so be sure to check out the full list for more details.
</Tip>
```py
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
)
```
### Download and define the dataset splits
Now that you've added some information about your dataset, the next step is to download the dataset and generate the splits.
1. Use the [`DownloadManager.download`] method to download the dataset and any other metadata you'd like to associate with it. This method accepts:
* a name to a file inside a Hub dataset repository (in other words, the `data/` folder)
* a URL to a file hosted somewhere else
* a list or dictionary of file names or URLs
In the Food-101 loading script, you'll notice again the URLs are defined earlier in the script.
2. After you've downloaded the dataset, use the [`SplitGenerator`] to organize the images and labels in each split. Name each split with a standard name like: `Split.TRAIN`, `Split.TEST`, and `SPLIT.Validation`.
In the `gen_kwargs` parameter, specify the file paths to the `images` to iterate over and load. If necessary, you can use [`DownloadManager.iter_archive`] to iterate over images in TAR archives. You can also specify the associated labels in the `metadata_path`. The `images` and `metadata_path` are actually passed onto the next step where you'll actually generate the dataset.
<Tip warning={true}>
To stream a TAR archive file, you need to use [`DownloadManager.iter_archive`]! The [`DownloadManager.download_and_extract`] function does not support TAR archives in streaming mode.
</Tip>
```py
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_BASE_URL)
split_metadata_paths = dl_manager.download(_METADATA_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"images": dl_manager.iter_archive(archive_path),
"metadata_path": split_metadata_paths["test"],
},
),
]
```
### Generate the dataset
The last method in the [`GeneratorBasedBuilder`] class actually generates the images and labels in the dataset. It yields a dataset according to the stucture specified in `features` from the `info` method. As you can see, `generate_examples` accepts the `images` and `metadata_path` from the previous method as arguments.
<Tip warning={true}>
To stream a TAR archive file, the `metadata_path` needs to be opened and read first. TAR files are accessed and yielded sequentially. This means you need to have the metadata information in hand first so you can yield it with its corresponding image.
</Tip>
Now you can write a function for opening and loading examples from the dataset:
```py
def _generate_examples(self, images, metadata_path):
"""Generate images and labels for splits."""
with open(metadata_path, encoding="utf-8") as f:
files_to_keep = set(f.read().split("\n"))
for file_path, file_obj in images:
if file_path.startswith(_IMAGES_DIR):
if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep:
label = file_path.split("/")[2]
yield file_path, {
"image": {"path": file_path, "bytes": file_obj.read()},
"label": label,
}
```
### Generate the dataset metadata (optional)
The dataset metadata can be generated and stored in the dataset card (`README.md` file).
Run the following command to generate your dataset metadata in `README.md` and make sure your new loading script works correctly:
```bash
datasets-cli test path/to/<your-dataset-loading-script> --save_info --all_configs
```
If your loading script passed the test, you should now have the `dataset_info` YAML fields in the header of the `README.md` file in your dataset folder.
### Upload the dataset to the Hub
Once your script is ready, [create a dataset card](./dataset_card) and [upload it to the Hub](./share).
Congratulations, you can now load your dataset from the Hub! 🥳
```py
>>> from datasets import load_dataset
>>> load_dataset("<username>/my_dataset")
```
| datasets/docs/source/image_dataset.mdx/0 | {
"file_path": "datasets/docs/source/image_dataset.mdx",
"repo_id": "datasets",
"token_count": 5689
} |
# Utilities
## Configure logging
🤗 Datasets strives to be transparent and explicit about how it works, but this can be quite verbose at times. We have included a series of logging methods which allow you to easily adjust the level of verbosity of the entire library. Currently the default verbosity of the library is set to `WARNING`.
To change the level of verbosity, use one of the direct setters. For instance, here is how to change the verbosity to the `INFO` level:
```py
import datasets
datasets.logging.set_verbosity_info()
```
You can also use the environment variable `DATASETS_VERBOSITY` to override the default verbosity, and set it to one of the following: `debug`, `info`, `warning`, `error`, `critical`:
```bash
DATASETS_VERBOSITY=error ./myprogram.py
```
All the methods of this logging module are documented below. The main ones are:
- [`logging.get_verbosity`] to get the current level of verbosity in the logger
- [`logging.set_verbosity`] to set the verbosity to the level of your choice
In order from the least to the most verbose (with their corresponding `int` values):
1. `logging.CRITICAL` or `logging.FATAL` (int value, 50): only report the most critical errors.
2. `logging.ERROR` (int value, 40): only report errors.
3. `logging.WARNING` or `logging.WARN` (int value, 30): only reports error and warnings. This the default level used by the library.
4. `logging.INFO` (int value, 20): reports error, warnings and basic information.
5. `logging.DEBUG` (int value, 10): report all information.
[[autodoc]] datasets.logging.get_verbosity
[[autodoc]] datasets.logging.set_verbosity
[[autodoc]] datasets.logging.set_verbosity_info
[[autodoc]] datasets.logging.set_verbosity_warning
[[autodoc]] datasets.logging.set_verbosity_debug
[[autodoc]] datasets.logging.set_verbosity_error
[[autodoc]] datasets.logging.disable_propagation
[[autodoc]] datasets.logging.enable_propagation
## Configure progress bars
By default, `tqdm` progress bars will be displayed during dataset download and preprocessing. You can disable them globally by setting `HF_DATASETS_DISABLE_PROGRESS_BARS`
environment variable. You can also enable/disable them using [`~utils.enable_progress_bars`] and [`~utils.disable_progress_bars`]. If set, the environment variable has priority on the helpers.
[[autodoc]] datasets.utils.enable_progress_bars
[[autodoc]] datasets.utils.disable_progress_bars
[[autodoc]] datasets.utils.are_progress_bars_disabled | datasets/docs/source/package_reference/utilities.mdx/0 | {
"file_path": "datasets/docs/source/package_reference/utilities.mdx",
"repo_id": "datasets",
"token_count": 725
} |
# Use with PyArrow
This document is a quick introduction to using `datasets` with PyArrow, with a particular focus on how to process
datasets using Arrow compute functions, and how to convert a dataset to PyArrow or from PyArrow.
This is particularly useful as it allows fast zero-copy operations, since `datasets` uses PyArrow under the hood.
## Dataset format
By default, datasets return regular Python objects: integers, floats, strings, lists, etc.
To get PyArrow Tables or Arrays instead, you can set the format of the dataset to `pyarrow` using [`Dataset.with_format`]:
```py
>>> from datasets import Dataset
>>> data = {"col_0": ["a", "b", "c", "d"], "col_1": [0., 0., 1., 1.]}
>>> ds = Dataset.from_dict(data)
>>> ds = ds.with_format("arrow")
>>> ds[0] # pa.Table
pyarrow.Table
col_0: string
col_1: double
----
col_0: [["a"]]
col_1: [[0]]
>>> ds[:2] # pa.Table
pyarrow.Table
col_0: string
col_1: double
----
col_0: [["a","b"]]
col_1: [[0,0]]
>>> ds["data"] # pa.array
<pyarrow.lib.ChunkedArray object at 0x1394312a0>
[
[
"a",
"b",
"c",
"d"
]
]
```
This also works for `IterableDataset` objects obtained e.g. using `load_dataset(..., streaming=True)`:
```py
>>> ds = ds.with_format("arrow")
>>> for df in ds.iter(batch_size=2):
... print(df)
... break
pyarrow.Table
col_0: string
col_1: double
----
col_0: [["a","b"]]
col_1: [[0,0]]
```
## Process data
PyArrow functions are generally faster than regular hand-written python functions, and therefore they are a good option to optimize data processing. You can use Arrow compute functions to process a dataset in [`Dataset.map`] or [`Dataset.filter`]:
```python
>>> import pyarrow.compute as pc
>>> from datasets import Dataset
>>> data = {"col_0": ["a", "b", "c", "d"], "col_1": [0., 0., 1., 1.]}
>>> ds = Dataset.from_dict(data)
>>> ds = ds.with_format("arrow")
>>> ds = ds.map(lambda t: t.append_column("col_2", pc.add(t["col_1"], 1)), batched=True)
>>> ds[:2]
pyarrow.Table
col_0: string
col_1: double
col_2: double
----
col_0: [["a","b"]]
col_1: [[0,0]]
col_2: [[1,1]]
>>> ds = ds.filter(lambda t: pc.equal(t["col_0"], "b"), batched=True)
>>> ds[0]
pyarrow.Table
col_0: string
col_1: double
col_2: double
----
col_0: [["b"]]
col_1: [[0]]
col_2: [[1]]
```
We use `batched=True` because it is faster to process batches of data in PyArrow rather than row by row. It's also possible to use `batch_size=` in `map()` to set the size of each `df`.
This also works for [`IterableDataset.map`] and [`IterableDataset.filter`].
## Import or Export from PyArrow
A [`Dataset`] is a wrapper of a PyArrow Table, you can instantiate a Dataset directly from the Table:
```python
ds = Dataset(table)
```
You can access the PyArrow Table of a dataset using [`Dataset.data`], which returns a [`MemoryMappedTable`] or a [`InMemoryTable`] or a [`ConcatenationTable`], depending on the origin of the Arrow data and the operations that were applied.
Those objects wrap the underlying PyArrow table accessible at `Dataset.data.table`. This table contains all the data of the dataset, but there might also be an indices mapping at `Dataset._indices` which maps the dataset rows indices to the PyArrow Table rows indices. This can happen if the dataset has been shuffled with [`Dataset.shuffle`] or if only a subset of the rows are used (e.g. after a [`Dataset.select`]).
In the general case, you can export a dataset to a PyArrow Table using `table = ds.with_format("arrow")[:]`.
| datasets/docs/source/use_with_pyarrow.mdx/0 | {
"file_path": "datasets/docs/source/use_with_pyarrow.mdx",
"repo_id": "datasets",
"token_count": 1257
} |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """
HIGHLIGHT_MESSAGE_POST = """=======
>>>>>>>
"""
TO_HIGHLIGHT = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
TO_CONVERT = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
Returns: ConvertCommand
"""
return ConvertCommand(args.tfds_path, args.datasets_directory)
class ConvertCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the datasets-cli
Args:
parser: Root parser to register command-specific arguments
"""
train_parser = parser.add_parser(
"convert",
help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.",
)
train_parser.add_argument(
"--tfds_path",
type=str,
required=True,
help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.",
)
train_parser.add_argument(
"--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder."
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(self, tfds_path: str, datasets_directory: str, *args):
self._logger = get_logger("datasets-cli/converting")
self._tfds_path = tfds_path
self._datasets_directory = datasets_directory
def run(self):
if os.path.isdir(self._tfds_path):
abs_tfds_path = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
abs_tfds_path = os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
abs_datasets_path = os.path.abspath(self._datasets_directory)
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
utils_files = []
with_manual_update = []
imports_to_builder_map = {}
if os.path.isdir(self._tfds_path):
file_names = os.listdir(abs_tfds_path)
else:
file_names = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}")
input_file = os.path.join(abs_tfds_path, f_name)
output_file = os.path.join(abs_datasets_path, f_name)
if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(input_file, encoding="utf-8") as f:
lines = f.readlines()
out_lines = []
is_builder = False
needs_manual_update = False
tfds_imports = []
for line in lines:
out_line = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
out_line = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
out_line = ""
continue
elif "from absl import logging" in out_line:
out_line = "from datasets import logging\n"
elif "getLogger" in out_line:
out_line = out_line.replace("getLogger", "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
needs_manual_update = True
to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n")
out_lines.append(out_line)
out_lines.append(HIGHLIGHT_MESSAGE_POST)
continue
else:
for pattern, replacement in TO_CONVERT:
out_line = re.sub(pattern, replacement, out_line)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
out_line = "from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}")
if "GeneratorBasedBuilder" in out_line:
is_builder = True
out_lines.append(out_line)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
dir_name = f_name.replace(".py", "")
output_dir = os.path.join(abs_datasets_path, dir_name)
output_file = os.path.join(output_dir, f_name)
os.makedirs(output_dir, exist_ok=True)
self._logger.info(f"Adding directory {output_dir}")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(output_file)
if needs_manual_update:
with_manual_update.append(output_file)
with open(output_file, "w", encoding="utf-8") as f:
f.writelines(out_lines)
self._logger.info(f"Converted in {output_file}")
for utils_file in utils_files:
try:
f_name = os.path.basename(utils_file)
dest_folder = imports_to_builder_map[f_name.replace(".py", "")]
self._logger.info(f"Moving {dest_folder} to {utils_file}")
shutil.copy(utils_file, dest_folder)
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'."
)
| datasets/src/datasets/commands/convert.py/0 | {
"file_path": "datasets/src/datasets/commands/convert.py",
"repo_id": "datasets",
"token_count": 3811
} |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.download_config import DownloadConfig
from ..table import array_cast
from ..utils.file_utils import xopen, xsplitext
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Audio:
"""Audio [`Feature`] to extract audio data from an audio file.
Input: The Audio feature accepts as input:
- A `str`: Absolute path to the audio file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `bytes`: Bytes content of the audio file.
This is useful for archived files with sequential access.
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `array`: Array containing the audio sample
- `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
This is useful for archived files with sequential access.
Args:
sampling_rate (`int`, *optional*):
Target sampling rate. If `None`, the native sampling rate is used.
mono (`bool`, defaults to `True`):
Whether to convert the audio signal to mono by averaging samples across
channels.
decode (`bool`, defaults to `True`):
Whether to decode the audio data. If `False`,
returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
Example:
```py
>>> from datasets import load_dataset, Audio
>>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
>>> ds[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
"""
sampling_rate: Optional[int] = None
mono: bool = True
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Audio", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str` or `dict`):
Data passed as input to Audio feature.
Returns:
`dict`
"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
if isinstance(value, str):
return {"bytes": None, "path": value}
elif isinstance(value, bytes):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
buffer = BytesIO()
sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm"):
# "PCM" only has raw audio bytes
if value.get("sampling_rate") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
if value.get("bytes"):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
else:
bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
buffer = BytesIO(bytes())
sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(
self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
) -> dict:
"""Decode example audio file into audio data.
Args:
value (`dict`):
A dictionary with keys:
- `path`: String with relative audio file path.
- `bytes`: Bytes of the audio file.
token_per_repo_id (`dict`, *optional*):
To access and decode
audio files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`)
Returns:
`dict`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
if file is None:
token_per_repo_id = token_per_repo_id or {}
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id[repo_id]
except (ValueError, KeyError):
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
array, sampling_rate = sf.read(f)
else:
array, sampling_rate = sf.read(file)
array = array.T
if self.mono:
array = librosa.to_mono(array)
if self.sampling_rate and self.sampling_rate != sampling_rate:
array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
sampling_rate = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature.")
return {
"bytes": Value("binary"),
"path": Value("string"),
}
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
"""Cast an Arrow array to the Audio arrow storage type.
The Arrow types that can be converted to the Audio pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the audio bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
Args:
storage (`Union[pa.StringArray, pa.StructArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed audio files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type)
| datasets/src/datasets/features/audio.py/0 | {
"file_path": "datasets/src/datasets/features/audio.py",
"repo_id": "datasets",
"token_count": 5332
} |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""DatasetInfo record information we know about a dataset.
This includes things that we know about the dataset statically, i.e.:
- description
- canonical location
- does it have validation and tests splits
- size
- etc.
This also includes the things that can and should be computed once we've
processed the dataset as well:
- number of examples (in each split)
- etc.
"""
import copy
import dataclasses
import json
import os
import posixpath
from dataclasses import dataclass
from pathlib import Path
from typing import ClassVar, Dict, List, Optional, Union
import fsspec
from fsspec.core import url_to_fs
from huggingface_hub import DatasetCard, DatasetCardData
from . import config
from .features import Features
from .splits import SplitDict
from .utils import Version
from .utils.logging import get_logger
from .utils.py_utils import asdict, unique_values
logger = get_logger(__name__)
@dataclass
class SupervisedKeysData:
input: str = ""
output: str = ""
@dataclass
class DownloadChecksumsEntryData:
key: str = ""
value: str = ""
class MissingCachedSizesConfigError(Exception):
"""The expected cached sizes of the download file are missing."""
class NonMatchingCachedSizesError(Exception):
"""The prepared split doesn't have expected sizes."""
@dataclass
class PostProcessedInfo:
features: Optional[Features] = None
resources_checksums: Optional[dict] = None
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
@classmethod
def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names})
@dataclass
class DatasetInfo:
"""Information about a dataset.
`DatasetInfo` documents datasets, including its name, version, and features.
See the constructor arguments and properties for a full list.
Not all fields are known on construction and may be updated later.
Attributes:
description (`str`):
A description of the dataset.
citation (`str`):
A BibTeX citation of the dataset.
homepage (`str`):
A URL to the official homepage for the dataset.
license (`str`):
The dataset's license. It can be the name of the license or a paragraph containing the terms of the license.
features ([`Features`], *optional*):
The features used to specify the dataset's column types.
post_processed (`PostProcessedInfo`, *optional*):
Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index.
supervised_keys (`SupervisedKeysData`, *optional*):
Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS).
builder_name (`str`, *optional*):
The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name.
config_name (`str`, *optional*):
The name of the configuration derived from [`BuilderConfig`].
version (`str` or [`Version`], *optional*):
The version of the dataset.
splits (`dict`, *optional*):
The mapping between split name and metadata.
download_checksums (`dict`, *optional*):
The mapping between the URL to download the dataset's checksums and corresponding metadata.
download_size (`int`, *optional*):
The size of the files to download to generate the dataset, in bytes.
post_processing_size (`int`, *optional*):
Size of the dataset in bytes after post-processing, if any.
dataset_size (`int`, *optional*):
The combined size in bytes of the Arrow tables for all splits.
size_in_bytes (`int`, *optional*):
The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files).
**config_kwargs (additional keyword arguments):
Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`].
"""
# Set in the dataset scripts
description: str = dataclasses.field(default_factory=str)
citation: str = dataclasses.field(default_factory=str)
homepage: str = dataclasses.field(default_factory=str)
license: str = dataclasses.field(default_factory=str)
features: Optional[Features] = None
post_processed: Optional[PostProcessedInfo] = None
supervised_keys: Optional[SupervisedKeysData] = None
# Set later by the builder
builder_name: Optional[str] = None
dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name
config_name: Optional[str] = None
version: Optional[Union[str, Version]] = None
# Set later by `download_and_prepare`
splits: Optional[dict] = None
download_checksums: Optional[dict] = None
download_size: Optional[int] = None
post_processing_size: Optional[int] = None
dataset_size: Optional[int] = None
size_in_bytes: Optional[int] = None
_INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [
"config_name",
"download_size",
"dataset_size",
"features",
"splits",
]
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo):
self.post_processed = PostProcessedInfo.from_dict(self.post_processed)
if self.version is not None and not isinstance(self.version, Version):
if isinstance(self.version, str):
self.version = Version(self.version)
else:
self.version = Version.from_dict(self.version)
if self.splits is not None and not isinstance(self.splits, SplitDict):
self.splits = SplitDict.from_split_dict(self.splits)
if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
if isinstance(self.supervised_keys, (tuple, list)):
self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
else:
self.supervised_keys = SupervisedKeysData(**self.supervised_keys)
def write_to_directory(self, dataset_info_dir, pretty_print=False, storage_options: Optional[dict] = None):
"""Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.
Args:
dataset_info_dir (`str`):
Destination directory.
pretty_print (`bool`, defaults to `False`):
If `True`, the JSON will be pretty-printed with the indent level of 4.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.info.write_to_directory("/path/to/directory/")
```
"""
fs: fsspec.AbstractFileSystem
fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f:
self._dump_info(f, pretty_print=pretty_print)
if self.license:
with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f:
self._dump_license(f)
def _dump_info(self, file, pretty_print=False):
"""Dump info in `file` file-like object open in bytes mode (to support remote files)"""
file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8"))
def _dump_license(self, file):
"""Dump license in `file` file-like object open in bytes mode (to support remote files)"""
file.write(self.license.encode("utf-8"))
@classmethod
def from_merge(cls, dataset_infos: List["DatasetInfo"]):
dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None]
if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos):
# if all dataset_infos are equal we don't need to merge. Just return the first.
return dataset_infos[0]
description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip()
citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip()
homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip()
license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip()
features = None
supervised_keys = None
return cls(
description=description,
citation=citation,
homepage=homepage,
license=license,
features=features,
supervised_keys=supervised_keys,
)
@classmethod
def from_directory(cls, dataset_info_dir: str, storage_options: Optional[dict] = None) -> "DatasetInfo":
"""Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the [`DatasetInfo`].
This will overwrite all previous metadata.
Args:
dataset_info_dir (`str`):
The directory containing the metadata file. This
should be the root directory of a specific dataset version.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example:
```py
>>> from datasets import DatasetInfo
>>> ds_info = DatasetInfo.from_directory("/path/to/directory/")
```
"""
fs: fsspec.AbstractFileSystem
fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
logger.info(f"Loading Dataset info from {dataset_info_dir}")
if not dataset_info_dir:
raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f:
dataset_info_dict = json.load(f)
return cls.from_dict(dataset_info_dict)
@classmethod
def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names})
def update(self, other_dataset_info: "DatasetInfo", ignore_none=True):
self_dict = self.__dict__
self_dict.update(
**{
k: copy.deepcopy(v)
for k, v in other_dataset_info.__dict__.items()
if (v is not None or not ignore_none)
}
)
def copy(self) -> "DatasetInfo":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def _to_yaml_dict(self) -> dict:
yaml_dict = {}
dataset_info_dict = asdict(self)
for key in dataset_info_dict:
if key in self._INCLUDED_INFO_IN_YAML:
value = getattr(self, key)
if hasattr(value, "_to_yaml_list"): # Features, SplitDict
yaml_dict[key] = value._to_yaml_list()
elif hasattr(value, "_to_yaml_string"): # Version
yaml_dict[key] = value._to_yaml_string()
else:
yaml_dict[key] = value
return yaml_dict
@classmethod
def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo":
yaml_data = copy.deepcopy(yaml_data)
if yaml_data.get("features") is not None:
yaml_data["features"] = Features._from_yaml_list(yaml_data["features"])
if yaml_data.get("splits") is not None:
yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"])
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in yaml_data.items() if k in field_names})
class DatasetInfosDict(Dict[str, DatasetInfo]):
def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None:
total_dataset_infos = {}
dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)
dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)
if not overwrite:
total_dataset_infos = self.from_directory(dataset_infos_dir)
total_dataset_infos.update(self)
if os.path.exists(dataset_infos_path):
# for backward compatibility, let's update the JSON file if it exists
with open(dataset_infos_path, "w", encoding="utf-8") as f:
dataset_infos_dict = {
config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()
}
json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None)
# Dump the infos in the YAML part of the README.md file
if os.path.exists(dataset_readme_path):
dataset_card = DatasetCard.load(dataset_readme_path)
dataset_card_data = dataset_card.data
else:
dataset_card = None
dataset_card_data = DatasetCardData()
if total_dataset_infos:
total_dataset_infos.to_dataset_card_data(dataset_card_data)
dataset_card = (
DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card
)
dataset_card.save(Path(dataset_readme_path))
@classmethod
def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict":
logger.info(f"Loading Dataset Infos from {dataset_infos_dir}")
# Load the info from the YAML part of README.md
if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)):
dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data
if "dataset_info" in dataset_card_data:
return cls.from_dataset_card_data(dataset_card_data)
if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)):
# this is just to have backward compatibility with dataset_infos.json files
with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
return cls(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
else:
return cls()
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict":
if isinstance(dataset_card_data.get("dataset_info"), (list, dict)):
if isinstance(dataset_card_data["dataset_info"], list):
return cls(
{
dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict(
dataset_info_yaml_dict
)
for dataset_info_yaml_dict in dataset_card_data["dataset_info"]
}
)
else:
dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"])
dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default")
return cls({dataset_info.config_name: dataset_info})
else:
return cls()
def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
if self:
# first get existing metadata info
if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict):
dataset_metadata_infos = {
dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"]
}
elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list):
dataset_metadata_infos = {
config_metadata["config_name"]: config_metadata
for config_metadata in dataset_card_data["dataset_info"]
}
else:
dataset_metadata_infos = {}
# update/rewrite existing metadata info with the one to dump
total_dataset_infos = {
**dataset_metadata_infos,
**{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()},
}
# the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo
for config_name, dset_info_yaml_dict in total_dataset_infos.items():
dset_info_yaml_dict["config_name"] = config_name
if len(total_dataset_infos) == 1:
# use a struct instead of a list of configurations, since there's only one
dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values()))
config_name = dataset_card_data["dataset_info"].pop("config_name", None)
if config_name != "default":
# if config_name is not "default" preserve it and put at the first position
dataset_card_data["dataset_info"] = {
"config_name": config_name,
**dataset_card_data["dataset_info"],
}
else:
dataset_card_data["dataset_info"] = []
for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()):
# add the config_name field in first position
dataset_info_yaml_dict.pop("config_name", None)
dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict}
dataset_card_data["dataset_info"].append(dataset_info_yaml_dict)
| datasets/src/datasets/info.py/0 | {
"file_path": "datasets/src/datasets/info.py",
"repo_id": "datasets",
"token_count": 8410
} |
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class XmlConfig(datasets.BuilderConfig):
"""BuilderConfig for xml files."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None
class Xml(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = XmlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].
If str or List[str], then the dataset returns only the 'train' split.
If dict, then keys should be from the `datasets.Split` enum.
"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa_table.cast(schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
else:
return pa_table.cast(pa.schema({"xml": pa.string()}))
def _generate_tables(self, files):
pa_table_names = list(self.config.features) if self.config.features is not None else ["xml"]
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
xml = f.read()
pa_table = pa.Table.from_arrays([pa.array([xml])], names=pa_table_names)
yield file_idx, self._cast_table(pa_table)
| datasets/src/datasets/packaged_modules/xml/xml.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/xml/xml.py",
"repo_id": "datasets",
"token_count": 1167
} |
# deprecated, please use the `filelock` package instead
from filelock import ( # noqa: F401 # imported for backward compatibility TODO: remove in 3.0.0
BaseFileLock,
SoftFileLock,
Timeout,
UnixFileLock,
WindowsFileLock,
)
from ._filelock import FileLock # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0
| datasets/src/datasets/utils/filelock.py/0 | {
"file_path": "datasets/src/datasets/utils/filelock.py",
"repo_id": "datasets",
"token_count": 115
} |
"""Utility helpers to handle progress bars in `datasets`.
Example:
1. Use `datasets.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.
2. To disable progress bars, either use `disable_progress_bars()` helper or set the
environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` to 1.
3. To re-enable progress bars, use `enable_progress_bars()`.
4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.
NOTE: Environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` has the priority.
Example:
```py
from datasets.utils import (
are_progress_bars_disabled,
disable_progress_bars,
enable_progress_bars,
tqdm,
)
# Disable progress bars globally
disable_progress_bars()
# Use as normal `tqdm`
for _ in tqdm(range(5)):
do_something()
# Still not showing progress bars, as `disable=False` is overwritten to `True`.
for _ in tqdm(range(5), disable=False):
do_something()
are_progress_bars_disabled() # True
# Re-enable progress bars globally
enable_progress_bars()
# Progress bar will be shown !
for _ in tqdm(range(5)):
do_something()
```
"""
import warnings
from tqdm.auto import tqdm as old_tqdm
from ..config import HF_DATASETS_DISABLE_PROGRESS_BARS
# `HF_DATASETS_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_datasets_progress_bars_disabled`
# is a `bool`. If `HF_DATASETS_DISABLE_PROGRESS_BARS` is set to True or False, it has priority.
# If `HF_DATASETS_DISABLE_PROGRESS_BARS` is None, it means the user have not set the
# environment variable and is free to enable/disable progress bars programmatically.
# TL;DR: env variable has priority over code.
#
# By default, progress bars are enabled.
_hf_datasets_progress_bars_disabled: bool = HF_DATASETS_DISABLE_PROGRESS_BARS or False
def disable_progress_bars() -> None:
"""
Disable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
variable has been set.
Use [`~utils.enable_progress_bars`] to re-enable them.
"""
if HF_DATASETS_DISABLE_PROGRESS_BARS is False:
warnings.warn(
"Cannot disable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=0` is set and has"
" priority."
)
return
global _hf_datasets_progress_bars_disabled
_hf_datasets_progress_bars_disabled = True
def enable_progress_bars() -> None:
"""
Enable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
variable has been set.
Use [`~utils.disable_progress_bars`] to disable them.
"""
if HF_DATASETS_DISABLE_PROGRESS_BARS is True:
warnings.warn(
"Cannot enable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=1` is set and has"
" priority."
)
return
global _hf_datasets_progress_bars_disabled
_hf_datasets_progress_bars_disabled = False
def are_progress_bars_disabled() -> bool:
"""Return whether progress bars are globally disabled or not.
Progress bars used in `datasets` can be enable or disabled globally using [`~utils.enable_progress_bars`]
and [`~utils.disable_progress_bars`] or by setting `HF_DATASETS_DISABLE_PROGRESS_BAR` as environment variable.
"""
global _hf_datasets_progress_bars_disabled
return _hf_datasets_progress_bars_disabled
class tqdm(old_tqdm):
"""
Class to override `disable` argument in case progress bars are globally disabled.
Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
"""
def __init__(self, *args, **kwargs):
if are_progress_bars_disabled():
kwargs["disable"] = True
super().__init__(*args, **kwargs)
def __delattr__(self, attr: str) -> None:
"""Fix for https://github.com/huggingface/datasets/issues/6066"""
try:
super().__delattr__(attr)
except AttributeError:
if attr != "_lock":
raise
# backward compatibility
enable_progress_bar = enable_progress_bars
disable_progress_bar = disable_progress_bars
def is_progress_bar_enabled():
return not are_progress_bars_disabled()
| datasets/src/datasets/utils/tqdm.py/0 | {
"file_path": "datasets/src/datasets/utils/tqdm.py",
"repo_id": "datasets",
"token_count": 1662
} |
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
from huggingface_hub.utils import hf_raise_for_status
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, monkeypatch):
# Enable implicit token
monkeypatch.setattr("huggingface_hub.constants.HF_HUB_DISABLE_IMPLICIT_TOKEN", False)
old_environ = dict(os.environ)
os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def _hf_gated_dataset_repo_txt_data(hf_api: HfApi, hf_token, text_file_content):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
token=hf_token,
path_or_fileobj=text_file_content.encode(),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
path = f"{hf_api.endpoint}/api/datasets/{repo_id}/settings"
repo_settings = {"gated": "auto"}
r = requests.put(
path,
headers={"authorization": f"Bearer {hf_token}"},
json=repo_settings,
)
hf_raise_for_status(r)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_gated_dataset_repo_txt_data(_hf_gated_dataset_repo_txt_data, ci_hub_config, ci_hfh_hf_hub_url):
return _hf_gated_dataset_repo_txt_data
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file_content):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=text_file_content.encode(),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
| datasets/tests/fixtures/hub.py/0 | {
"file_path": "datasets/tests/fixtures/hub.py",
"repo_id": "datasets",
"token_count": 2900
} |
import shutil
import textwrap
import numpy as np
import pytest
from datasets import ClassLabel, Features, Image, Value
from datasets.builder import InvalidConfigName
from datasets.data_files import DataFilesDict, DataFilesList, get_data_patterns
from datasets.download.streaming_download_manager import StreamingDownloadManager
from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder, ImageFolderConfig
from ..utils import require_pil
@pytest.fixture
def cache_dir(tmp_path):
return str(tmp_path / "imagefolder_cache_dir")
@pytest.fixture
def data_files_with_labels_no_metadata(tmp_path, image_file):
data_dir = tmp_path / "data_files_with_labels_no_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "cat"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "dog"
subdir_class_1.mkdir(parents=True, exist_ok=True)
image_filename = subdir_class_0 / "image_cat.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = subdir_class_1 / "image_dog.jpg"
shutil.copyfile(image_file, image_filename2)
data_files_with_labels_no_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
return data_files_with_labels_no_metadata
@pytest.fixture
def image_files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, image_file):
data_dir = tmp_path / "image_files_with_labels_and_label_key_in_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
subdir_class_0 = data_dir / "cat"
subdir_class_0.mkdir(parents=True, exist_ok=True)
subdir_class_1 = data_dir / "dog"
subdir_class_1.mkdir(parents=True, exist_ok=True)
image_filename = subdir_class_0 / "image_cat.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = subdir_class_1 / "image_dog.jpg"
shutil.copyfile(image_file, image_filename2)
image_metadata_filename = tmp_path / data_dir / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "cat/image_cat.jpg", "caption": "Nice image of a cat", "label": "Cat"}
{"file_name": "dog/image_dog.jpg", "caption": "Nice image of a dog", "label": "Dog"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
return str(image_filename), str(image_filename2), str(image_metadata_filename)
@pytest.fixture
def image_file_with_metadata(tmp_path, image_file):
image_filename = tmp_path / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_metadata_filename = tmp_path / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
return str(image_filename), str(image_metadata_filename)
@pytest.fixture
def image_files_with_metadata_that_misses_one_image(tmp_path, image_file):
image_filename = tmp_path / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = tmp_path / "image_rgb2.jpg"
shutil.copyfile(image_file, image_filename2)
image_metadata_filename = tmp_path / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
return str(image_filename), str(image_filename2), str(image_metadata_filename)
@pytest.fixture(params=["jsonl", "csv"])
def data_files_with_one_split_and_metadata(request, tmp_path, image_file):
data_dir = tmp_path / "imagefolder_data_dir_with_metadata_one_split"
data_dir.mkdir(parents=True, exist_ok=True)
subdir = data_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
image_filename = data_dir / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = data_dir / "image_rgb2.jpg"
shutil.copyfile(image_file, image_filename2)
image_filename3 = subdir / "image_rgb3.jpg" # in subdir
shutil.copyfile(image_file, image_filename3)
image_metadata_filename = data_dir / f"metadata.{request.param}"
image_metadata = (
textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
{"file_name": "image_rgb2.jpg", "caption": "Nice second image"}
{"file_name": "subdir/image_rgb3.jpg", "caption": "Nice third image"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,caption
image_rgb.jpg,Nice image
image_rgb2.jpg,Nice second image
subdir/image_rgb3.jpg,Nice third image
"""
)
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_one_split_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_one_split_and_metadata) == 1
assert len(data_files_with_one_split_and_metadata["train"]) == 4
return data_files_with_one_split_and_metadata
@pytest.fixture(params=["jsonl", "csv"])
def data_files_with_two_splits_and_metadata(request, tmp_path, image_file):
data_dir = tmp_path / "imagefolder_data_dir_with_metadata_two_splits"
data_dir.mkdir(parents=True, exist_ok=True)
train_dir = data_dir / "train"
train_dir.mkdir(parents=True, exist_ok=True)
test_dir = data_dir / "test"
test_dir.mkdir(parents=True, exist_ok=True)
image_filename = train_dir / "image_rgb.jpg" # train image
shutil.copyfile(image_file, image_filename)
image_filename2 = train_dir / "image_rgb2.jpg" # train image
shutil.copyfile(image_file, image_filename2)
image_filename3 = test_dir / "image_rgb3.jpg" # test image
shutil.copyfile(image_file, image_filename3)
train_image_metadata_filename = train_dir / f"metadata.{request.param}"
image_metadata = (
textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice train image"}
{"file_name": "image_rgb2.jpg", "caption": "Nice second train image"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,caption
image_rgb.jpg,Nice train image
image_rgb2.jpg,Nice second train image
"""
)
)
with open(train_image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
test_image_metadata_filename = test_dir / f"metadata.{request.param}"
image_metadata = (
textwrap.dedent(
"""\
{"file_name": "image_rgb3.jpg", "caption": "Nice test image"}
"""
)
if request.param == "jsonl"
else textwrap.dedent(
"""\
file_name,caption
image_rgb3.jpg,Nice test image
"""
)
)
with open(test_image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns(
get_data_patterns(str(data_dir)), data_dir.as_posix()
)
assert len(data_files_with_two_splits_and_metadata) == 2
assert len(data_files_with_two_splits_and_metadata["train"]) == 3
assert len(data_files_with_two_splits_and_metadata["test"]) == 2
return data_files_with_two_splits_and_metadata
@pytest.fixture
def data_files_with_zip_archives(tmp_path, image_file):
from PIL import Image, ImageOps
data_dir = tmp_path / "imagefolder_data_dir_with_zip_archives"
data_dir.mkdir(parents=True, exist_ok=True)
archive_dir = data_dir / "archive"
archive_dir.mkdir(parents=True, exist_ok=True)
subdir = archive_dir / "subdir"
subdir.mkdir(parents=True, exist_ok=True)
image_filename = archive_dir / "image_rgb.jpg"
shutil.copyfile(image_file, image_filename)
image_filename2 = subdir / "image_rgb2.jpg" # in subdir
# make sure they're two different images
# Indeed we won't be able to compare the image.filename, since the archive is not extracted in streaming mode
ImageOps.flip(Image.open(image_file)).save(image_filename2)
image_metadata_filename = archive_dir / "metadata.jsonl"
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
{"file_name": "subdir/image_rgb2.jpg", "caption": "Nice second image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
shutil.make_archive(archive_dir, "zip", archive_dir)
shutil.rmtree(str(archive_dir))
data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
assert len(data_files_with_zip_archives) == 1
assert len(data_files_with_zip_archives["train"]) == 1
return data_files_with_zip_archives
def test_config_raises_when_invalid_name() -> None:
with pytest.raises(InvalidConfigName, match="Bad characters"):
_ = ImageFolderConfig(name="name-with-*-invalid-character")
@pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])])
def test_config_raises_when_invalid_data_files(data_files) -> None:
with pytest.raises(ValueError, match="Expected a DataFilesDict"):
_ = ImageFolderConfig(name="name", data_files=data_files)
@require_pil
# check that labels are inferred correctly from dir names
def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir):
# there are no metadata.jsonl files in this test case
imagefolder = ImageFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False)
imagefolder.download_and_prepare()
assert imagefolder.info.features == Features({"image": Image(), "label": ClassLabel(names=["cat", "dog"])})
dataset = list(imagefolder.as_dataset()["train"])
label_feature = imagefolder.info.features["label"]
assert dataset[0]["label"] == label_feature._str2int["cat"]
assert dataset[1]["label"] == label_feature._str2int["dog"]
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_duplicated_label_key(
image_files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog
):
cat_image_file, dog_image_file, image_metadata_file = image_files_with_labels_and_duplicated_label_key_in_metadata
imagefolder = ImageFolder(
drop_metadata=drop_metadata,
drop_labels=drop_labels,
data_files=[cat_image_file, dog_image_file, image_metadata_file],
cache_dir=cache_dir,
)
if drop_labels is False:
# infer labels from directories even if metadata files are found
imagefolder.download_and_prepare()
warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records)
assert warning_in_logs if drop_metadata is not True else not warning_in_logs
dataset = imagefolder.as_dataset()["train"]
assert imagefolder.info.features["label"] == ClassLabel(names=["cat", "dog"])
assert all(example["label"] in imagefolder.info.features["label"]._str2int.values() for example in dataset)
else:
imagefolder.download_and_prepare()
dataset = imagefolder.as_dataset()["train"]
if drop_metadata is not True:
# labels are from metadata
assert imagefolder.info.features["label"] == Value("string")
assert all(example["label"] in ["Cat", "Dog"] for example in dataset)
else:
# drop both labels and metadata
assert imagefolder.info.features == Features({"image": Image()})
assert all(example.keys() == {"image"} for example in dataset)
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels):
imagefolder = ImageFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata
)
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# removing the labels explicitly requires drop_labels=True
assert gen_kwargs["add_labels"] is not bool(drop_labels)
assert gen_kwargs["add_metadata"] is False
generator = imagefolder._generate_examples(**gen_kwargs)
if not drop_labels:
assert all(
example.keys() == {"image", "label"} and all(val is not None for val in example.values())
for _, example in generator
)
else:
assert all(
example.keys() == {"image"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_generate_examples_drop_metadata(image_file_with_metadata, drop_metadata, drop_labels):
image_file, image_metadata_file = image_file_with_metadata
imagefolder = ImageFolder(
drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [image_file, image_metadata_file]}
)
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
# since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True
assert gen_kwargs["add_metadata"] is not bool(drop_metadata)
# since the dataset has metadata, adding the labels explicitly requires drop_labels=False
assert gen_kwargs["add_labels"] is (drop_labels is False)
generator = imagefolder._generate_examples(**gen_kwargs)
expected_columns = {"image"}
if gen_kwargs["add_metadata"]:
expected_columns.add("caption")
if gen_kwargs["add_labels"]:
expected_columns.add("label")
result = [example for _, example in generator]
assert len(result) == 1
example = result[0]
assert example.keys() == expected_columns
for column in expected_columns:
assert example[column] is not None
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_in_wrong_location(image_file, image_file_with_metadata, drop_metadata):
_, image_metadata_file = image_file_with_metadata
imagefolder = ImageFolder(drop_metadata=drop_metadata, data_files={"train": [image_file, image_metadata_file]})
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = imagefolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"image"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_pil
@pytest.mark.parametrize("drop_metadata", [None, True, False])
def test_generate_examples_with_metadata_that_misses_one_image(
image_files_with_metadata_that_misses_one_image, drop_metadata
):
image_file, image_file2, image_metadata_file = image_files_with_metadata_that_misses_one_image
if not drop_metadata:
features = Features({"image": Image(), "caption": Value("string")})
else:
features = Features({"image": Image()})
imagefolder = ImageFolder(
drop_metadata=drop_metadata,
features=features,
data_files={"train": [image_file, image_file2, image_metadata_file]},
)
gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs
generator = imagefolder._generate_examples(**gen_kwargs)
if not drop_metadata:
with pytest.raises(ValueError):
list(generator)
else:
assert all(
example.keys() == {"image"} and all(val is not None for val in example.values())
for _, example in generator
)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata):
data_files = data_files_with_one_split_and_metadata
imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir)
imagefolder.download_and_prepare()
datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_images = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_images
# make sure each sample has its own image and metadata
assert len({example["image"].filename for example in dataset}) == expected_num_of_images
assert len({example["caption"] for example in dataset}) == expected_num_of_images
assert all(example["caption"] is not None for example in dataset)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata):
data_files = data_files_with_two_splits_and_metadata
imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir)
imagefolder.download_and_prepare()
datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset()
for split, data_files in data_files.items():
expected_num_of_images = len(data_files) - 1 # don't count the metadata file
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_images
# make sure each sample has its own image and metadata
assert len({example["image"].filename for example in dataset}) == expected_num_of_images
assert len({example["caption"] for example in dataset}) == expected_num_of_images
assert all(example["caption"] is not None for example in dataset)
@require_pil
@pytest.mark.parametrize("streaming", [False, True])
def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives):
imagefolder = ImageFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir)
imagefolder.download_and_prepare()
datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset()
for split, data_files in data_files_with_zip_archives.items():
num_of_archives = len(data_files) # the metadata file is inside the archive
expected_num_of_images = 2 * num_of_archives
assert split in datasets
dataset = list(datasets[split])
assert len(dataset) == expected_num_of_images
# make sure each sample has its own image and metadata
assert len({np.array(example["image"])[0, 0, 0] for example in dataset}) == expected_num_of_images
assert len({example["caption"] for example in dataset}) == expected_num_of_images
assert all(example["caption"] is not None for example in dataset)
@require_pil
def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, image_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_file, data_dir / "image_rgb.jpg")
image_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file
image_metadata = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
imagefolder.download_and_prepare()
dataset = imagefolder.as_dataset(split="train")
# check that there are no metadata, since the metadata file name doesn't have the right name
assert "caption" not in dataset.column_names
@require_pil
def test_data_files_with_wrong_image_file_name_column_in_metadata_file(cache_dir, tmp_path, image_file):
data_dir = tmp_path / "data_dir_with_bad_metadata"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_file, data_dir / "image_rgb.jpg")
image_metadata_filename = data_dir / "metadata.jsonl"
image_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name"
"""\
{"bad_file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename, "w", encoding="utf-8") as f:
f.write(image_metadata)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
imagefolder.download_and_prepare()
assert "`file_name` must be present" in str(exc_info.value)
@require_pil
def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, image_file):
data_dir = tmp_path / "data_dir_with_metadata_in_different_format"
data_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(image_file, data_dir / "image_rgb.jpg")
image_metadata_filename_jsonl = data_dir / "metadata.jsonl"
image_metadata_jsonl = textwrap.dedent(
"""\
{"file_name": "image_rgb.jpg", "caption": "Nice image"}
"""
)
with open(image_metadata_filename_jsonl, "w", encoding="utf-8") as f:
f.write(image_metadata_jsonl)
image_metadata_filename_csv = data_dir / "metadata.csv"
image_metadata_csv = textwrap.dedent(
"""\
file_name,caption
image_rgb.jpg,Nice image
"""
)
with open(image_metadata_filename_csv, "w", encoding="utf-8") as f:
f.write(image_metadata_csv)
data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix())
imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir)
with pytest.raises(ValueError) as exc_info:
imagefolder.download_and_prepare()
assert "metadata files with different extensions" in str(exc_info.value)
| datasets/tests/packaged_modules/test_imagefolder.py/0 | {
"file_path": "datasets/tests/packaged_modules/test_imagefolder.py",
"repo_id": "datasets",
"token_count": 8893
} |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.download.streaming_download_manager import StreamingDownloadManager
from datasets.utils.file_utils import hash_url_to_filename, xopen
from datasets.utils.py_utils import NestedDataStructure
URL = "tmp://file1.txt"
CONTENT = '"text": ["foo", "foo"]'
HASH = "ce0516943c3a4f9af269cf40fa658d615fa0f00d2dd9ef3f0ac5a3b35be0b719"
class MockResponse:
status_code = 200
headers = {"Content-Length": "100"}
cookies = {}
def iter_content(self, **kwargs):
return [bytes(CONTENT, "utf-8")]
def mock_request(*args, **kwargs):
return MockResponse()
@pytest.mark.parametrize("urls_type", ["str", "list", "dict", "dict_of_dict"])
def test_download_manager_download(urls_type, tmp_path, tmpfs):
url = URL
with tmpfs.open(url, "w") as f:
f.write(CONTENT)
urls_types = {"str": url, "list": [url], "dict": {"train": url}, "dict_of_dict": {"train": {"en": url}}}
urls = urls_types[urls_type]
dataset_name = "dummy"
cache_subdir = "downloads"
cache_dir_root = tmp_path
download_config = DownloadConfig(
cache_dir=os.path.join(cache_dir_root, cache_subdir),
use_etag=False,
)
dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config)
downloaded_paths = dl_manager.download(urls)
assert isinstance(downloaded_paths, type(urls))
if "urls_type".startswith("list"):
assert len(downloaded_paths) == len(urls)
elif "urls_type".startswith("dict"):
assert downloaded_paths.keys() == urls.keys()
if "urls_type" == "dict_of_dict":
key = list(urls.keys())[0]
assert isinstance(downloaded_paths[key], dict)
assert downloaded_paths[key].keys() == urls[key].keys()
for downloaded_path, url in zip(
NestedDataStructure(downloaded_paths).flatten(), NestedDataStructure(urls).flatten()
):
downloaded_path = Path(downloaded_path)
parts = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
content = downloaded_path.read_text()
assert content == CONTENT
metadata_downloaded_path = downloaded_path.with_suffix(".json")
assert metadata_downloaded_path.exists()
metadata_content = json.loads(metadata_downloaded_path.read_text())
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type", [str, list, dict])
@pytest.mark.parametrize("extract_on_the_fly", [False, True])
def test_download_manager_extract(paths_type, xz_file, text_file, extract_on_the_fly):
filename = str(xz_file)
if issubclass(paths_type, str):
paths = filename
elif issubclass(paths_type, list):
paths = [filename]
elif issubclass(paths_type, dict):
paths = {"train": filename}
dataset_name = "dummy"
cache_dir = xz_file.parent
extracted_subdir = "extracted"
download_config = DownloadConfig(
cache_dir=cache_dir,
use_etag=False,
extract_on_the_fly=extract_on_the_fly,
)
dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config)
extracted_paths = dl_manager.extract(paths)
input_paths = paths
for extracted_paths in [extracted_paths]:
if isinstance(paths, str):
extracted_paths = [extracted_paths]
input_paths = [paths]
elif isinstance(paths, dict):
assert "train" in extracted_paths.keys()
extracted_paths = extracted_paths.values()
input_paths = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(extracted_paths, input_paths):
assert extracted_path == dl_manager.extracted_paths[input_path]
if not extract_on_the_fly:
extracted_path = Path(extracted_path)
parts = extracted_path.parts
assert parts[-1] == hash_url_to_filename(input_path, etag=None)
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
extracted_file_content = extracted_path.read_text()
expected_file_content = text_file.read_text()
assert extracted_file_content == expected_file_content
else:
assert extracted_path == StreamingDownloadManager(
dataset_name=dataset_name, download_config=download_config
).extract(xz_file)
assert xopen(extracted_path).read() == text_file.read_text()
def test_download_manager_delete_extracted_files(xz_file):
dataset_name = "dummy"
cache_dir = xz_file.parent
extracted_subdir = "extracted"
download_config = DownloadConfig(
cache_dir=cache_dir,
use_etag=False,
)
dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config)
extracted_path = dl_manager.extract(xz_file)
assert extracted_path == dl_manager.extracted_paths[xz_file]
extracted_path = Path(extracted_path)
parts = extracted_path.parts
# import pdb; pdb.set_trace()
assert parts[-1] == hash_url_to_filename(str(xz_file), etag=None)
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
dl_manager.delete_extracted_files()
assert not extracted_path.exists()
def _test_jsonl(path, file):
assert path.endswith(".jsonl")
for num_items, line in enumerate(file, start=1):
item = json.loads(line.decode("utf-8"))
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"])
def test_iter_archive_path(archive_jsonl, request):
archive_jsonl_path = request.getfixturevalue(archive_jsonl)
dl_manager = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(archive_jsonl_path), start=1):
_test_jsonl(path, file)
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"])
def test_iter_archive_file(archive_nested_jsonl, request):
archive_nested_jsonl_path = request.getfixturevalue(archive_nested_jsonl)
dl_manager = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(archive_nested_jsonl_path), start=1):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1):
_test_jsonl(subpath, subfile)
assert num_tar == 1
assert num_jsonl == 2
def test_iter_files(data_dir_with_hidden_files):
dl_manager = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(data_dir_with_hidden_files), start=1):
assert os.path.basename(file) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| datasets/tests/test_download_manager.py/0 | {
"file_path": "datasets/tests/test_download_manager.py",
"repo_id": "datasets",
"token_count": 2945
} |
from tempfile import NamedTemporaryFile
import pytest
import requests
from datasets.utils.file_utils import fsspec_get, fsspec_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline, require_not_windows
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_timeout():
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT):
with pytest.raises(RequestWouldHangIndefinitelyError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.Timeout):
requests.request("GET", "https://huggingface.co", timeout=1.0)
with pytest.raises(requests.exceptions.Timeout), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_connection_error():
with offline(OfflineSimulationMode.CONNECTION_FAILS):
with pytest.raises(requests.exceptions.ConnectionError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
def test_offline_with_datasets_offline_mode_enabled():
with offline(OfflineSimulationMode.HF_HUB_OFFLINE_SET_TO_1):
with pytest.raises(ConnectionError):
fsspec_head("hf://dummy")
with pytest.raises(ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
| datasets/tests/test_offline_util.py/0 | {
"file_path": "datasets/tests/test_offline_util.py",
"repo_id": "datasets",
"token_count": 641
} |
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples
# make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
export PYTHONPATH = src
check_dirs := examples scripts src tests utils benchmarks
modified_only_fixup:
$(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs)))
@if test -n "$(modified_py_files)"; then \
echo "Checking/fixing $(modified_py_files)"; \
ruff check $(modified_py_files) --fix; \
ruff format $(modified_py_files);\
else \
echo "No library .py files were modified"; \
fi
# Update src/diffusers/dependency_versions_table.py
deps_table_update:
@python setup.py deps_table_update
deps_table_check_updated:
@md5sum src/diffusers/dependency_versions_table.py > md5sum.saved
@python setup.py deps_table_update
@md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1)
@rm md5sum.saved
# autogenerating code
autogenerate_code: deps_table_update
# Check that the repo is in a good state
repo-consistency:
python utils/check_dummies.py
python utils/check_repo.py
python utils/check_inits.py
# this target runs checks on all files
quality:
ruff check $(check_dirs) setup.py
ruff format --check $(check_dirs) setup.py
doc-builder style src/diffusers docs/source --max_len 119 --check_only
python utils/check_doc_toc.py
# Format source code automatically and check is there are any problems left that need manual fixing
extra_style_checks:
python utils/custom_init_isort.py
python utils/check_doc_toc.py --fix_and_overwrite
# this target runs checks on all files and potentially modifies some of them
style:
ruff check $(check_dirs) setup.py --fix
ruff format $(check_dirs) setup.py
doc-builder style src/diffusers docs/source --max_len 119
${MAKE} autogenerate_code
${MAKE} extra_style_checks
# Super fast fix and check target that only works on relevant modified files since the branch was made
fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency
# Make marked copies of snippets of codes conform to the original
fix-copies:
python utils/check_copies.py --fix_and_overwrite
python utils/check_dummies.py --fix_and_overwrite
# Run tests for the library
test:
python -m pytest -n auto --dist=loadfile -s -v ./tests/
# Run tests for examples
test-examples:
python -m pytest -n auto --dist=loadfile -s -v ./examples/
# Release stuff
pre-release:
python utils/release.py
pre-patch:
python utils/release.py --patch
post-release:
python utils/release.py --post_release
post-patch:
python utils/release.py --post_release --patch
| diffusers/Makefile/0 | {
"file_path": "diffusers/Makefile",
"repo_id": "diffusers",
"token_count": 929
} |
FROM ubuntu:20.04
LABEL maintainer="Hugging Face"
LABEL repository="diffusers"
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get -y update \
&& apt-get install -y software-properties-common \
&& add-apt-repository ppa:deadsnakes/ppa
RUN apt install -y bash \
build-essential \
git \
git-lfs \
curl \
ca-certificates \
libsndfile1-dev \
libgl1 \
python3.10 \
python3-pip \
python3.10-venv && \
rm -rf /var/lib/apt/lists
# make sure to use venv
RUN python3.10 -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
# follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container
RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
python3 -m uv pip install --upgrade --no-cache-dir \
clu \
"jax[cpu]>=0.2.16,!=0.3.2" \
"flax>=0.4.1" \
"jaxlib>=0.1.65" && \
python3 -m uv pip install --no-cache-dir \
accelerate \
datasets \
hf-doc-builder \
huggingface-hub \
Jinja2 \
librosa \
numpy==1.26.4 \
scipy \
tensorboard \
transformers \
hf_transfer
CMD ["/bin/bash"] | diffusers/docker/diffusers-flax-cpu/Dockerfile/0 | {
"file_path": "diffusers/docker/diffusers-flax-cpu/Dockerfile",
"repo_id": "diffusers",
"token_count": 652
} |
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License. -->
# Caching methods
## Pyramid Attention Broadcast
[Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588) from Xuanlei Zhao, Xiaolong Jin, Kai Wang, Yang You.
Pyramid Attention Broadcast (PAB) is a method that speeds up inference in diffusion models by systematically skipping attention computations between successive inference steps and reusing cached attention states. The attention states are not very different between successive inference steps. The most prominent difference is in the spatial attention blocks, not as much in the temporal attention blocks, and finally the least in the cross attention blocks. Therefore, many cross attention computation blocks can be skipped, followed by the temporal and spatial attention blocks. By combining other techniques like sequence parallelism and classifier-free guidance parallelism, PAB achieves near real-time video generation.
Enable PAB with [`~PyramidAttentionBroadcastConfig`] on any pipeline. For some benchmarks, refer to [this](https://github.com/huggingface/diffusers/pull/9562) pull request.
```python
import torch
from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig
pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
pipe.to("cuda")
# Increasing the value of `spatial_attention_timestep_skip_range[0]` or decreasing the value of
# `spatial_attention_timestep_skip_range[1]` will decrease the interval in which pyramid attention
# broadcast is active, leader to slower inference speeds. However, large intervals can lead to
# poorer quality of generated videos.
config = PyramidAttentionBroadcastConfig(
spatial_attention_block_skip_range=2,
spatial_attention_timestep_skip_range=(100, 800),
current_timestep_callback=lambda: pipe.current_timestep,
)
pipe.transformer.enable_cache(config)
```
### CacheMixin
[[autodoc]] CacheMixin
### PyramidAttentionBroadcastConfig
[[autodoc]] PyramidAttentionBroadcastConfig
[[autodoc]] apply_pyramid_attention_broadcast
| diffusers/docs/source/en/api/cache.md/0 | {
"file_path": "diffusers/docs/source/en/api/cache.md",
"repo_id": "diffusers",
"token_count": 674
} |
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License. -->
# AutoencoderKLHunyuanVideo
The 3D variational autoencoder (VAE) model with KL loss used in [HunyuanVideo](https://github.com/Tencent/HunyuanVideo/), which was introduced in [HunyuanVideo: A Systematic Framework For Large Video Generative Models](https://huggingface.co/papers/2412.03603) by Tencent.
The model can be loaded with the following code snippet.
```python
from diffusers import AutoencoderKLHunyuanVideo
vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder="vae", torch_dtype=torch.float16)
```
## AutoencoderKLHunyuanVideo
[[autodoc]] AutoencoderKLHunyuanVideo
- decode
- all
## DecoderOutput
[[autodoc]] models.autoencoders.vae.DecoderOutput
| diffusers/docs/source/en/api/models/autoencoder_kl_hunyuan_video.md/0 | {
"file_path": "diffusers/docs/source/en/api/models/autoencoder_kl_hunyuan_video.md",
"repo_id": "diffusers",
"token_count": 383
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.