text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from datasets import Dataset from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer from trl import RLOOConfig, RLOOTrainer class RLOOTrainerTester(unittest.TestCase): def setUp(self): self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.policy_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.reward_model = AutoModelForSequenceClassification.from_pretrained(self.model_id) self.policy_ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, padding_side="left") self.tokenizer.add_special_tokens({"pad_token": "[PAD]"}) def test_rloo_checkpoint(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = RLOOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, total_episodes=1, report_to="none", ) dummy_text = [{"content": "Hello World!", "role": "user"}] dummy_data = self.tokenizer.apply_chat_template(dummy_text) dummy_dataset = Dataset.from_dict({"input_ids": dummy_data}) trainer = RLOOTrainer( config=training_args, policy=self.policy_model, reward_model=self.reward_model, ref_policy=self.policy_ref_model, processing_class=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) trainer._save_checkpoint(trainer.model, trial=None) def test_rloo_reward(self): local_batch_size = 3 rloo_k = 4 sequence_length = 5 # Add sequence length for testing token-level rewards # fmt: off rlhf_reward = torch.tensor([ 1, 2, 3, # first rlhf reward for three prompts 2, 3, 4, # second rlhf reward for three prompts 5, 6, 7, # third rlhf reward for three prompts 8, 9, 10, # fourth rlhf reward for three prompts ]).float() # Create padding mask where 1 indicates valid token, 0 indicates padding padding_mask = torch.ones(local_batch_size * rloo_k, sequence_length) # Set padding based on sequence lengths sequence_lengths = torch.tensor([ 3, 4, 3, # lengths for first batch 4, 3, 4, # lengths for second batch 3, 4, 3, # lengths for third batch 4, 3, 4, # lengths for fourth batch ]) for i, length in enumerate(sequence_lengths): padding_mask[i, length:] = 0 # Add kl tensor for testing token-level rewards kl = torch.ones(local_batch_size * rloo_k, sequence_length) # Dummy KL values # fmt: on # Test token-level KL rewards following OpenRLHF implementation kl_coef = 0.1 kl_reward = -kl_coef * kl # Find last non-padded position eos_indices = padding_mask.size(1) - 1 - padding_mask.long().fliplr().argmax(dim=1, keepdim=True) # Create last reward tensor last_reward = torch.zeros_like(kl) last_reward.scatter_(dim=1, index=eos_indices, src=rlhf_reward.reshape(-1, 1)) # Test last_reward - should have rlhf_reward at the last non-padded position for i, (length, reward) in enumerate(zip(sequence_lengths, rlhf_reward)): # Check reward is at correct position self.assertEqual(last_reward[i, length - 1].item(), reward.item()) # Check zeros elsewhere self.assertTrue(torch.all(last_reward[i, : length - 1] == 0)) self.assertTrue(torch.all(last_reward[i, length:] == 0)) # Combine rewards reward = last_reward + kl_reward non_score_reward = kl_reward.sum(1) token_level_rlhf_reward = reward.sum(1) # Test reward components # KL reward should be -0.1 for each token in sequence length expected_kl_reward = -0.1 * sequence_length # Each position gets -0.1 KL reward torch.testing.assert_close(non_score_reward, torch.tensor(expected_kl_reward).expand_as(non_score_reward)) # Total reward should be rlhf_reward + kl_reward expected_total = rlhf_reward + expected_kl_reward torch.testing.assert_close(token_level_rlhf_reward, expected_total) # Test sequence-level rewards (existing test) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) advantages = torch.zeros_like(rlhf_reward) for i in range(0, len(advantages), local_batch_size): other_response_rlhf_rewards = [] for j in range(0, len(advantages), local_batch_size): if i != j: other_response_rlhf_rewards.append(rlhf_reward[j : j + local_batch_size]) advantages[i : i + local_batch_size] = rlhf_reward[i : i + local_batch_size] - torch.stack( other_response_rlhf_rewards ).mean(0) self.assertLess((1 - (2 + 5 + 8) / 3 - advantages[0].item()), 1e-6) self.assertLess((6 - (3 + 2 + 9) / 3 - advantages[7].item()), 1e-6) # Test vectorized implementation rlhf_reward = rlhf_reward.reshape(rloo_k, local_batch_size) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) vec_advantages = rlhf_reward - baseline torch.testing.assert_close(vec_advantages.flatten(), advantages) def test_rloo_training(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = RLOOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, per_device_eval_batch_size=2, total_episodes=1, num_train_epochs=1, max_steps=2, report_to="none", ) # Create a simple dataset dummy_text = [{"content": "Hello World!", "role": "user"}] dummy_data = self.tokenizer.apply_chat_template(dummy_text) dummy_dataset = Dataset.from_dict({"input_ids": [dummy_data, dummy_data]}) trainer = RLOOTrainer( config=training_args, policy=self.policy_model, reward_model=self.reward_model, ref_policy=self.policy_ref_model, processing_class=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) # Test that training completes without errors trainer.train() # Check if objective/rlhf_reward is available self.assertIn("objective/rlhf_reward", trainer.state.log_history[-1]) def test_rloo_training_with_custom_reward(self): # dummy reward function def reward_function(texts): # based on length of text rewards = [len(text) for text in texts] return rewards with tempfile.TemporaryDirectory() as tmp_dir: training_args = RLOOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, per_device_eval_batch_size=2, total_episodes=1, num_train_epochs=1, max_steps=2, report_to="none", ) # Create a simple dataset dummy_text = [{"content": "Hello World!", "role": "user"}] dummy_data = self.tokenizer.apply_chat_template(dummy_text) dummy_dataset = Dataset.from_dict({"input_ids": [dummy_data, dummy_data]}) trainer = RLOOTrainer( config=training_args, policy=self.policy_model, reward_model=reward_function, ref_policy=self.policy_ref_model, processing_class=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) # Test that training completes without errors trainer.train() # Check if objective/rlhf_reward is available self.assertIn("objective/rlhf_reward", trainer.state.log_history[-1])
trl/tests/test_rloo_trainer.py/0
{ "file_path": "trl/tests/test_rloo_trainer.py", "repo_id": "trl", "token_count": 4153 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os from itertools import chain from types import ModuleType from typing import Any from transformers.utils.import_utils import _is_package_available # Use same as transformers.utils.import_utils _deepspeed_available = _is_package_available("deepspeed") _diffusers_available = _is_package_available("diffusers") _llm_blender_available = _is_package_available("llm_blender") _mergekit_available = _is_package_available("mergekit") _rich_available = _is_package_available("rich") _unsloth_available = _is_package_available("unsloth") _vllm_available = _is_package_available("vllm") def is_deepspeed_available() -> bool: return _deepspeed_available def is_diffusers_available() -> bool: return _diffusers_available def is_llm_blender_available() -> bool: return _llm_blender_available def is_mergekit_available() -> bool: return _mergekit_available def is_rich_available() -> bool: return _rich_available def is_unsloth_available() -> bool: return _unsloth_available def is_vllm_available() -> bool: return _vllm_available class _LazyModule(ModuleType): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. """ # Very heavily inspired by optuna.integration._IntegrationModule # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): super().__init__(name) self._modules = set(import_structure.keys()) self._class_to_module = {} for key, values in import_structure.items(): for value in values: self._class_to_module[value] = key # Needed for autocompletion in an IDE self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] self._objects = {} if extra_objects is None else extra_objects self._name = name self._import_structure = import_structure # Needed for autocompletion in an IDE def __dir__(self): result = super().__dir__() # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. for attr in self.__all__: if attr not in result: result.append(attr) return result def __getattr__(self, name: str) -> Any: if name in self._objects: return self._objects[name] if name in self._modules: value = self._get_module(name) elif name in self._class_to_module.keys(): module = self._get_module(self._class_to_module[name]) value = getattr(module, name) else: raise AttributeError(f"module {self.__name__} has no attribute {name}") setattr(self, name, value) return value def _get_module(self, module_name: str): try: return importlib.import_module("." + module_name, self.__name__) except Exception as e: raise RuntimeError( f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" f" traceback):\n{e}" ) from e def __reduce__(self): return (self.__class__, (self._name, self.__file__, self._import_structure)) class OptionalDependencyNotAvailable(BaseException): """Internally used error class for signalling an optional dependency was not found."""
trl/trl/import_utils.py/0
{ "file_path": "trl/trl/import_utils.py", "repo_id": "trl", "token_count": 1598 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect import logging import os import subprocess import sys from dataclasses import dataclass, field from typing import Iterable, Optional, Union import yaml from transformers import HfArgumentParser from transformers.hf_argparser import DataClass, DataClassType logger = logging.getLogger(__name__) @dataclass class ScriptArguments: """ Arguments common to all scripts. Args: dataset_name (`str`): Dataset name. dataset_config (`str` or `None`, *optional*, defaults to `None`): Dataset configuration name. Corresponds to the `name` argument of the [`~datasets.load_dataset`] function. dataset_train_split (`str`, *optional*, defaults to `"train"`): Dataset split to use for training. dataset_test_split (`str`, *optional*, defaults to `"test"`): Dataset split to use for evaluation. gradient_checkpointing_use_reentrant (`bool`, *optional*, defaults to `False`): Whether to apply `use_reentrant` for gradient checkpointing. ignore_bias_buffers (`bool`, *optional*, defaults to `False`): Debug argument for distributed training. Fix for DDP issues with LM bias/mask buffers - invalid scalar type, inplace operation. See https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992. """ dataset_name: str = field(metadata={"help": "Dataset name."}) dataset_config: Optional[str] = field( default=None, metadata={ "help": "Dataset configuration name. Corresponds to the `name` argument of the `datasets.load_dataset` " "function." }, ) dataset_train_split: str = field(default="train", metadata={"help": "Dataset split to use for training."}) dataset_test_split: str = field(default="test", metadata={"help": "Dataset split to use for evaluation."}) gradient_checkpointing_use_reentrant: bool = field( default=False, metadata={"help": "Whether to apply `use_reentrant` for gradient checkpointing."}, ) ignore_bias_buffers: bool = field( default=False, metadata={ "help": "Debug argument for distributed training. Fix for DDP issues with LM bias/mask buffers - invalid " "scalar type, inplace operation. See " "https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992." }, ) def init_zero_verbose(): """ Perform zero verbose init - use this method on top of the CLI modules to make """ import logging import warnings from rich.logging import RichHandler FORMAT = "%(message)s" logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()], level=logging.ERROR) # Custom warning handler to redirect warnings to the logging system def warning_handler(message, category, filename, lineno, file=None, line=None): logging.warning(f"{filename}:{lineno}: {category.__name__}: {message}") # Add the custom warning handler - we need to do that before importing anything to make sure the loggers work well warnings.showwarning = warning_handler class TrlParser(HfArgumentParser): """ A subclass of [`transformers.HfArgumentParser`] designed for parsing command-line arguments with dataclass-backed configurations, while also supporting configuration file loading and environment variable management. Args: dataclass_types (`Union[DataClassType, Iterable[DataClassType]]` or `None`, *optional*, defaults to `None`): Dataclass types to use for argument parsing. **kwargs: Additional keyword arguments passed to the [`transformers.HfArgumentParser`] constructor. Examples: ```yaml # config.yaml env: VAR1: value1 arg1: 23 ``` ```python # main.py import os from dataclasses import dataclass from trl import TrlParser @dataclass class MyArguments: arg1: int arg2: str = "alpha" parser = TrlParser(dataclass_types=[MyArguments]) training_args = parser.parse_args_and_config() print(training_args, os.environ.get("VAR1")) ``` ```bash $ python main.py --config config.yaml (MyArguments(arg1=23, arg2='alpha'),) value1 $ python main.py --arg1 5 --arg2 beta (MyArguments(arg1=5, arg2='beta'),) None ``` """ def __init__( self, dataclass_types: Optional[Union[DataClassType, Iterable[DataClassType]]] = None, **kwargs, ): # Make sure dataclass_types is an iterable if dataclass_types is None: dataclass_types = [] elif not isinstance(dataclass_types, Iterable): dataclass_types = [dataclass_types] # Check that none of the dataclasses have the "config" field for dataclass_type in dataclass_types: if "config" in dataclass_type.__dataclass_fields__: raise ValueError( f"Dataclass {dataclass_type.__name__} has a field named 'config'. This field is reserved for the " f"config file path and should not be used in the dataclass." ) super().__init__(dataclass_types=dataclass_types, **kwargs) def parse_args_and_config( self, args: Optional[Iterable[str]] = None, return_remaining_strings: bool = False ) -> tuple[DataClass, ...]: """ Parse command-line args and config file into instances of the specified dataclass types. This method wraps [`transformers.HfArgumentParser.parse_args_into_dataclasses`] and also parses the config file specified with the `--config` flag. The config file (in YAML format) provides argument values that replace the default values in the dataclasses. Command line arguments can override values set by the config file. The method also sets any environment variables specified in the `env` field of the config file. """ args = list(args) if args is not None else sys.argv[1:] if "--config" in args: # Get the config file path from config_index = args.index("--config") args.pop(config_index) # remove the --config flag config_path = args.pop(config_index) # get the path to the config file with open(config_path) as yaml_file: config = yaml.safe_load(yaml_file) # Set the environment variables specified in the config file if "env" in config: env_vars = config.pop("env", {}) if not isinstance(env_vars, dict): raise ValueError("`env` field should be a dict in the YAML file.") for key, value in env_vars.items(): os.environ[key] = str(value) # Set the defaults from the config values config_remaining_strings = self.set_defaults_with_config(**config) else: config_remaining_strings = [] # Parse the arguments from the command line output = self.parse_args_into_dataclasses(args=args, return_remaining_strings=return_remaining_strings) # Merge remaining strings from the config file with the remaining strings from the command line if return_remaining_strings: args_remaining_strings = output[-1] return output[:-1] + (config_remaining_strings + args_remaining_strings,) else: return output def set_defaults_with_config(self, **kwargs) -> list[str]: """ Overrides the parser's default values with those provided via keyword arguments. Any argument with an updated default will also be marked as not required if it was previously required. Returns a list of strings that were not consumed by the parser. """ # If an argument is in the kwargs, update its default and set it as not required for action in self._actions: if action.dest in kwargs: action.default = kwargs.pop(action.dest) action.required = False remaining_strings = [item for key, value in kwargs.items() for item in [f"--{key}", str(value)]] return remaining_strings def get_git_commit_hash(package_name): try: # Import the package to locate its path package = importlib.import_module(package_name) # Get the path to the package using inspect package_path = os.path.dirname(inspect.getfile(package)) # Navigate up to the Git repository root if the package is inside a subdirectory git_repo_path = os.path.abspath(os.path.join(package_path, "..")) git_dir = os.path.join(git_repo_path, ".git") if os.path.isdir(git_dir): # Run the git command to get the current commit hash commit_hash = ( subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=git_repo_path).strip().decode("utf-8") ) return commit_hash else: return None except Exception as e: return f"Error: {str(e)}"
trl/trl/scripts/utils.py/0
{ "file_path": "trl/trl/scripts/utils.py", "repo_id": "trl", "token_count": 3753 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from transformers import TrainingArguments @dataclass class GRPOConfig(TrainingArguments): r""" Configuration class for the [`GRPOTrainer`]. Only the parameters specific to GRPO training are listed here. For details on other parameters, refer to the [`~transformers.TrainingArguments`] documentation. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: > Parameters that control the model and reference model model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments for [`~transformers.AutoModelForCausalLM.from_pretrained`], used when the `model` argument of the [`GRPOTrainer`] is provided as a string. > Parameters that control the data preprocessing remove_unused_columns (`bool`, *optional*, defaults to `False`): Whether to only keep the column `"prompt"` in the dataset. If you use a custom reward function that requires any column other than `"prompts"` and `"completions"`, you should keep this to `False`. max_prompt_length (`int` or `None`, *optional*, defaults to `512`): Maximum length of the prompt. If the prompt is longer than this value, it will be truncated left. num_generations (`int` or `None`, *optional*, defaults to `8`): Number of generations per prompt to sample. The global batch size (num_processes * per_device_batch_size) must be divisible by this value. temperature (`float`, *optional*, defaults to `0.9`): Temperature for sampling. The higher the temperature, the more random the completions. max_completion_length (`int` or `None`, *optional*, defaults to `256`): Maximum length of the generated completion. ds3_gather_for_generation (`bool`, *optional*, defaults to `True`): This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation, improving generation speed. However, disabling this option allows training models that exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation. Disabling this option is not compatible with vLLM generation. > Parameters that control generation acceleration powered by vLLM use_vllm (`bool`, *optional*, defaults to `False`): Whether to use vLLM for generating completions. If set to `True`, ensure that a GPU is kept unused for training, as vLLM will require one for generation. vLLM must be installed (`pip install vllm`). vllm_device (`str`, *optional*, defaults to `"auto"`): Device where vLLM generation will run, e.g. `"cuda:1"`. If set to `"auto"` (default), the system will automatically select the next available GPU after the last one used for training. This assumes that training has not already occupied all available GPUs. vllm_gpu_memory_utilization (`float`, *optional*, defaults to `0.9`): Ratio (between 0 and 1) of GPU memory to reserve for the model weights, activations, and KV cache on the device dedicated to generation powered by vLLM. Higher values will increase the KV cache size and thus improve the model's throughput. However, if the value is too high, it may cause out-of-memory (OOM) errors during initialization. vllm_dtype (`str`, *optional*, defaults to `"auto"`): Data type to use for vLLM generation. If set to `"auto"`, the data type will be automatically determined based on the model configuration. Find the supported values in the vLLM documentation. vllm_max_model_len (`int` or `None`, *optional*, defaults to `None`): If set, the `max_model_len` to use for vLLM. This could be useful when running with reduced `vllm_gpu_memory_utilization`, leading to a reduced KV cache size. If not set, vLLM will use the model context size, which might be much larger than the KV cache, leading to inefficiencies. > Parameters that control the training learning_rate (`float`, *optional*, defaults to `1e-6`): Initial learning rate for [`AdamW`] optimizer. The default value replaces that of [`~transformers.TrainingArguments`]. beta (`float`, *optional*, defaults to `0.04`): KL coefficient. sync_ref_model (`bool`, *optional*, defaults to `False`): Whether to synchronize the reference model with the active model every `ref_model_sync_steps` steps, using the `ref_model_mixup_alpha` parameter. This synchronization originites from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper. ref_model_mixup_alpha (`float`, *optional*, defaults to `0.9`): α parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which controls the mix between the current policy and the previous reference policy during updates. The reference policy is updated according to the equation: `π_ref = α * π_θ + (1 - α) * π_ref_prev`. To use this parameter, you must set `sync_ref_model=True`. ref_model_sync_steps (`int`, *optional*, defaults to `64`): τ parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which determines how frequently the current policy is synchronized with the reference policy. To use this parameter, you must set `sync_ref_model=True`. > Parameters that control the logging log_completions (`bool`, *optional*, defaults to `False`): Whether to log the completions during training. """ # Parameters that control the model and reference model model_init_kwargs: Optional[dict] = field( default=None, metadata={ "help": "Keyword arguments for `transformers.AutoModelForCausalLM.from_pretrained`, used when the `model` " "argument of the `GRPOTrainer` is provided as a string." }, ) # Parameters that control the data preprocessing # The default value remove_unused_columns is overwritten from the parent class, because in GRPO we usually rely on # additional columns to compute the reward remove_unused_columns: Optional[bool] = field( default=False, metadata={ "help": "Whether to only keep the column 'prompt' in the dataset. If you use a custom reward function " "that requires any column other than 'prompts' and 'completions', you should keep this to `False`." }, ) max_prompt_length: Optional[int] = field( default=512, metadata={ "help": "Maximum length of the prompt. If the prompt is longer than this value, it will be truncated left." }, ) num_generations: Optional[int] = field( default=8, metadata={ "help": "Number of generations to sample. The global batch size (num_processes * per_device_batch_size) " "must be divisible by this value." }, ) temperature: Optional[float] = field( default=0.9, metadata={"help": "Temperature for sampling. The higher the temperature, the more random the completions."}, ) max_completion_length: Optional[int] = field( default=256, metadata={"help": "Maximum length of the generated completion."}, ) ds3_gather_for_generation: bool = field( default=True, metadata={ "help": "This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for " "generation, improving generation speed. However, disabling this option allows training models that " "exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation. Disabling this option " "is not compatible with vLLM generation." }, ) # Parameters that control generation acceleration powered by vLLM use_vllm: Optional[bool] = field( default=False, metadata={ "help": "Whether to use vLLM for generating completions. If set to `True`, ensure that a GPU is kept " "unused for training, as vLLM will require one for generation. vLLM must be installed " "(`pip install vllm`)." }, ) vllm_device: Optional[str] = field( default="auto", metadata={ "help": "Device where vLLM generation will run, e.g. 'cuda:1'. If set to 'auto' (default), the system " "will automatically select the next available GPU after the last one used for training. This assumes " "that training has not already occupied all available GPUs." }, ) vllm_gpu_memory_utilization: float = field( default=0.9, metadata={ "help": "Ratio (between 0 and 1) of GPU memory to reserve for the model weights, activations, and KV " "cache on the device dedicated to generation powered by vLLM. Higher values will increase the KV cache " "size and thus improve the model's throughput. However, if the value is too high, it may cause " "out-of-memory (OOM) errors during initialization." }, ) vllm_dtype: Optional[str] = field( default="auto", metadata={ "help": "Data type to use for vLLM generation. If set to 'auto', the data type will be automatically " "determined based on the model configuration. Find the supported values in the vLLM documentation." }, ) vllm_max_model_len: Optional[int] = field( default=None, metadata={ "help": "If set, the `max_model_len` to use for vLLM. This could be useful when running with reduced " "`vllm_gpu_memory_utilization`, leading to a reduced KV cache size. If not set, vLLM will use the model " "context size, which might be much larger than the KV cache, leading to inefficiencies." }, ) # Parameters that control the training learning_rate: float = field( default=1e-6, metadata={ "help": "Initial learning rate for `AdamW` optimizer. The default value replaces that of " "`transformers.TrainingArguments`." }, ) beta: float = field( default=0.04, metadata={"help": "KL coefficient."}, ) sync_ref_model: bool = field( default=False, metadata={ "help": "Whether to synchronize the reference model with the active model every `ref_model_sync_steps` " "steps, using the `ref_model_mixup_alpha` parameter." }, ) ref_model_mixup_alpha: float = field( default=0.9, metadata={ "help": "α parameter from the TR-DPO paper, which controls the mix between the current policy and the " "previous reference policy during updates. The reference policy is updated according to the equation: " "`π_ref = α * π_θ + (1 - α) * π_ref_prev`. To use this parameter, you must set `sync_ref_model=True`." }, ) ref_model_sync_steps: int = field( default=64, metadata={ "help": "τ parameter from the TR-DPO paper, which determines how frequently the current policy is " "synchronized with the reference policy. To use this parameter, you must set `sync_ref_model=True`." }, ) # Parameters that control the logging log_completions: bool = field( default=False, metadata={"help": "Whether to log the completions during training."}, )
trl/trl/trainer/grpo_config.py/0
{ "file_path": "trl/trl/trainer/grpo_config.py", "repo_id": "trl", "token_count": 4561 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import textwrap import warnings from itertools import chain from typing import Callable, Optional, Union import torch import torch.nn as nn from accelerate import PartialState from datasets import Dataset, features from transformers import ( BaseImageProcessor, DataCollator, DataCollatorForTokenClassification, FeatureExtractionMixin, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, Trainer, is_wandb_available, ) from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalPrediction from transformers.utils import is_peft_available from .prm_config import PRMConfig from .utils import compute_accuracy, disable_dropout_in_model, generate_model_card if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training if is_wandb_available(): import wandb class PRMTrainer(Trainer): """ Initialize PRMTrainer. Args: model (`transformers.PreTrainedModel`): The model to train, preferably an `AutoModelForTokenClassification`. args (`PRMConfig`): The arguments to use for training. data_collator (`transformers.DataCollator`): The data collator to use for training. If None is specified, the default data collator (`DataCollatorForTokenClassification`) will be used which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences. train_dataset (`datasets.Dataset`): The dataset to use for training. eval_dataset (`datasets.Dataset`): The dataset to use for evaluation. processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*): Processing class used to process the data. If provided, will be used to automatically process the inputs for the model, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. model_init (`Callable[[], transformers.PreTrainedModel]`): The model initializer to use for training. If None is specified, the default model initializer will be used. compute_metrics (`Callable[[transformers.EvalPrediction], dict]`, *optional* defaults to `compute_accuracy`): The metrics to use for evaluation. If no metrics are specified, the default metric (`compute_accuracy`) will be used. callbacks (`list[transformers.TrainerCallback]`): The callbacks to use for training. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): The optimizer and scheduler to use for training. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): The function to use to preprocess the logits before computing the metrics. peft_config (`dict`, defaults to `None`): The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model. """ _tag_names = ["trl", "prm"] def __init__( self, model: Optional[Union[PreTrainedModel, nn.Module]] = None, args: Optional[PRMConfig] = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, model_init: Optional[Callable[[], PreTrainedModel]] = None, compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None, callbacks: Optional[list[TrainerCallback]] = None, optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = ( None, None, ), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, peft_config: Optional[dict] = None, ): if not is_peft_available() and peft_config is not None: raise ValueError( "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models" ) elif is_peft_available() and peft_config is not None: if not isinstance(model, PeftModel): if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_quantized", False): _supports_gc_kwargs = "gradient_checkpointing_kwargs" in list( inspect.signature(prepare_model_for_kbit_training).parameters ) prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing} if not _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None: warnings.warn( "You passed `gradient_checkpointing_kwargs` in the trainer's kwargs, but your peft version does not support it. " "please update to the latest version of peft to use `gradient_checkpointing_kwargs`." ) elif _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None: prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) model = get_peft_model(model, peft_config) # Disable dropout in the model if args.disable_dropout: disable_dropout_in_model(model) if compute_metrics is None: compute_metrics = compute_accuracy if data_collator is None: if processing_class is None: raise ValueError( "A processing_class must be specified when using the default DataCollatorForTokenClassification" ) data_collator = DataCollatorForTokenClassification(processing_class, max_length=args.max_length) if "input_ids" not in train_dataset.column_names: with PartialState().local_main_process_first(): fn_kwargs = { "tokenizer": processing_class, "step_separator": args.step_separator, "max_length": args.max_length, "max_prompt_length": args.max_prompt_length, "max_completion_length": args.max_completion_length, "train_on_last_step_only": args.train_on_last_step_only, } train_fn_kwargs = {**fn_kwargs, "is_eval": False} train_dataset = train_dataset.map( self.tokenize_row, fn_kwargs=train_fn_kwargs, num_proc=args.dataset_num_proc, remove_columns=train_dataset.features, desc="Tokenizing train dataset", features=features.Features( # needed to avoid map to cast labels to bool { "labels": features.Sequence(features.Value("int64")), "input_ids": features.Sequence(features.Value("int64")), } ), ) eval_fn_kwargs = {**fn_kwargs, "is_eval": True} if eval_dataset is not None: eval_dataset = eval_dataset.map( self.tokenize_row, fn_kwargs=eval_fn_kwargs, num_proc=args.dataset_num_proc, remove_columns=eval_dataset.features, desc="Tokenizing eval dataset", features=features.Features( # needed to avoid map to cast labels to bool { "labels": features.Sequence(features.Value("int64")), "input_ids": features.Sequence(features.Value("int64")), } ), ) super().__init__( model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=processing_class, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) @staticmethod def tokenize_row( features, tokenizer, step_separator, max_length, max_prompt_length, max_completion_length, train_on_last_step_only, is_eval, ): r""" Tokenize a row of the dataset. Args: features (`dict[str, str]`): Row of the dataset, should contain the keys `"prompt"`, `"completions"`, and `"labels"`. tokenizer (`PreTrainedTokenizerBase`): Tokenizer used to process the data. step_separator (`str`): Separator between steps in the completion. max_length (`int` or `None`): Maximum length of the sequences (prompt + completion). If `None`, the sequences are not truncated. max_prompt_length (`int` or `None`): Maximum length of the prompt. If `None`, the prompt is not truncated. max_completion_length (`int` or `None`): Maximum length of the completion sequences. If `None`, the completion sequences are not truncated. train_on_last_step_only (`bool`): Whether to train only on the last step. If `True`, the labels are `-100` for all tokens except the last token of the completion. is_eval (`bool`): Whether the function is used to tokenize samples from a training or an evaluation dataset. Used only if `train_on_last_step_only` is set to `True`. Returns: `dict[str, list[int]]`: Tokenized sequences with the keys `"input_ids"`, and `"labels". Example: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B") >>> features = {"prompt": "Which number is larger, 9.8 or 9.11?", ... "completions": ["11 is greater than 8.", ... "Hence, 9.11 > 9.8."], ... "labels": [True, False]} >>> PRMTrainer.tokenize_row(features, tokenizer, "\n", max_completion_length=None, train_on_last_step_only=False, is_eval=False) {'input_ids': [23085, 1372, 374, 8131, 11, 220, 24, 13, 23, 476, 220, 24, 13, 16, 16, 30, 16, 16, 374, 7046, 1091, 220, 23, 13, 198, 39, 763, 11, 220, 24, 13, 16, 16, 861, 220, 24, 13, 23, 13, 198], 'labels': [-100, -100, -100, -100, -100, -100, -100, -100, 1, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 0]} ``` """ # Tokenize the prompt and completions prompt_ids = tokenizer(features["prompt"], add_special_tokens=False)["input_ids"] completions_ids = [ tokenizer(completion, add_special_tokens=False)["input_ids"] for completion in features["completions"] ] if train_on_last_step_only and not is_eval: labels = [-100] * (len(features["labels"]) - 1) + [int(features["labels"][-1])] else: labels = [int(label) for label in features["labels"]] # Get the ID of the separator token and add it to the completions separator_ids = tokenizer.encode(step_separator, add_special_tokens=False) completions_ids = [completion + separator_ids for completion in completions_ids] # Create the label labels = [[-100] * (len(completion) - 1) + [label] for completion, label in zip(completions_ids, labels)] # Join the completions and labels steps completion_ids = list(chain(*completions_ids)) labels = list(chain(*labels)) if tokenizer.bos_token_id is not None: prompt_ids = [tokenizer.bos_token_id] + prompt_ids # Truncate prompt and completion sequences if max_prompt_length is not None: prompt_ids = prompt_ids[-max_prompt_length:] if max_completion_length is not None: completion_ids = completion_ids[:max_completion_length] labels = labels[:max_completion_length] input_ids = prompt_ids + completion_ids labels = [-100] * len(prompt_ids) + labels if max_length is not None: input_ids = input_ids[:max_length] labels = labels[:max_length] return {"input_ids": input_ids, "labels": labels} def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None tags = tags or [] if isinstance(tags, str): tags = [tags] if hasattr(self.model.config, "unsloth_version"): tags.append("unsloth") citation = textwrap.dedent("""\ @article{uesato2022solving, title = {{Solving Math Word Problems With Process- and Outcome-Based Feedback}}, author = {Uesato, Jonathan and Kushman, Nate and Kumar, Ramana and Song, Francis and Siegel, Noah and Wang, Lisa and Creswell, Antonia and Irving, Geoffrey and Higgins, Irina}, year = 2022, journal = {arXiv preprint arXiv:2211.14275} }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, trainer_name="PRM", trainer_citation=citation, paper_title="Solving math word problems with process-and outcome-based feedback", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/prm_trainer.py/0
{ "file_path": "trl/trl/trainer/prm_trainer.py", "repo_id": "trl", "token_count": 7231 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Launching distributed training from Jupyter Notebooks This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system. You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training. <Tip> This tutorial is also available as a Jupyter Notebook [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb) </Tip> ## Configuring the Environment Before any training can be performed, a Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts: ```bash accelerate config ``` However, if general defaults are fine and you are *not* running on a TPU, Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`]. The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this. <Tip warning={true}> CUDA can't be initialized more than once on a multi-GPU system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed. </Tip> ```python import os from accelerate.utils import write_basic_config write_basic_config() # Write a config file os._exit(00) # Restart the notebook ``` ## Preparing the Dataset and Model Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU. If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later. Make sure the dataset is downloaded based on the directions [here](https://github.com/huggingface/accelerate/tree/main/examples#simple-vision-example) ```python import os, re, torch, PIL import numpy as np from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator from accelerate.utils import set_seed from timm import create_model ``` First you need to create a function to extract the class name based on a filename: ```python import os data_dir = "../../images" fnames = os.listdir(data_dir) fname = fnames[0] print(fname) ``` ```python out beagle_32.jpg ``` In the case here, the label is `beagle`. Using regex you can extract the label from the filename: ```python import re def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] ``` ```python extract_label(fname) ``` And you can see it properly returned the right name for our file: ```python out "beagle" ``` Next a `Dataset` class should be made to handle grabbing the image and the label: ```python class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} ``` Now to build the dataset. Outside the training function you can find and declare all the filenames and labels and use them as references inside the launched function: ```python fnames = [os.path.join("../../images", fname) for fname in fnames if fname.endswith(".jpg")] ``` Next gather all the labels: ```python all_labels = [extract_label(fname) for fname in fnames] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} ``` Next, you should make a `get_dataloaders` function that will return your built dataloaders for you. As mentioned earlier, if data is automatically sent to the GPU or a TPU device when building your `DataLoaders`, they must be built using this method. ```python def get_dataloaders(batch_size: int = 64): "Builds a set of dataloaders with a batch_size" random_perm = np.random.permutation(len(fnames)) cut = int(0.8 * len(fnames)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training a simple RandomResizedCrop will be used train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset([fnames[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id) # For evaluation a deterministic Resize will be used eval_tfm = Compose([Resize((224, 224)), ToTensor()]) eval_dataset = PetsDataset([fnames[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size * 2, num_workers=4) return train_dataloader, eval_dataloader ``` Finally, you should import the scheduler to be used later: ```python from torch.optim.lr_scheduler import CosineAnnealingLR ``` ## Writing the Training Function Now you can build the training loop. [`notebook_launcher`] works by passing in a function to call that will be ran across the distributed system. Here is a basic training loop for the animal classification problem: <Tip> The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end </Tip> ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) accelerator = Accelerator(mixed_precision=mixed_precision) ``` First you should set the seed and create an [`Accelerator`] object as early in the training loop as possible. <Tip warning={true}> If training on the TPU, your training loop should take in the model as a parameter and it should be instantiated outside of the training loop function. See the [TPU best practices](../concept_guides/training_tpu) to learn why </Tip> Next you should build your dataloaders and create your model: ```python train_dataloader, eval_dataloader = get_dataloaders(batch_size) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) ``` <Tip> You build the model here so that the seed also controls the new weight initialization </Tip> As you are performing transfer learning in this example, the encoder of the model starts out frozen so the head of the model can be trained only initially: ```python for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True ``` Normalizing the batches of images will make training a little faster: ```python mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] ``` To make these constants available on the active device, you should set it to the Accelerator's device: ```python mean = mean.to(accelerator.device) std = std.to(accelerator.device) ``` Next instantiate the rest of the PyTorch classes used for training: ```python optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) ``` Before passing everything to [`~Accelerator.prepare`]. <Tip> There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the prepare method. </Tip> ```python model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) ``` Now train the model: ```python for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() ``` The evaluation loop will look slightly different compared to the training loop. The number of elements passed as well as the overall total accuracy of each batch will be added to two constants: ```python model.eval() accurate = 0 num_elems = 0 ``` Next you have the rest of your standard PyTorch loop: ```python for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) ``` Before finally the last major difference. When performing distributed evaluation, the predictions and labels need to be passed through [`~Accelerator.gather`] so that all of the data is available on the current device and a properly calculated metric can be achieved: ```python accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() ``` Now you just need to calculate the actual metric for this problem, and you can print it on the main process using [`~Accelerator.print`]: ```python eval_metric = accurate.item() / num_elems accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` A full version of this training loop is available below: ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) # Initialize accelerator accelerator = Accelerator(mixed_precision=mixed_precision) # Build dataloaders train_dataloader, eval_dataloader = get_dataloaders(batch_size) # Instantiate the model (you build the model here so that the seed also controls new weight initializations) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # Freeze the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # You can normalize the batches of images to be a bit faster mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] # To make these constants available on the active device, set it to the accelerator device mean = mean.to(accelerator.device) std = std.to(accelerator.device) # Instantiate the optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) # Instantiate the learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now you train the model for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() accurate = 0 num_elems = 0 for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` ## Using the notebook_launcher All that's left is to use the [`notebook_launcher`]. You pass in the function, the arguments (as a tuple), and the number of processes to train on. (See the [documentation](../package_reference/launchers) for more information) ```python from accelerate import notebook_launcher ``` ```python args = ("fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=2) ``` In the case of running on multiple nodes, you need to set up a Jupyter session at each node and run the launching cell at the same time. For an environment containing 2 nodes (computers) with 8 GPUs each and the main computer with an IP address of "172.31.43.8", it would look like so: ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=0, num_nodes=2, num_processes=8) ``` And in the second Jupyter session on the other machine: <Tip> Notice how the `node_rank` has changed </Tip> ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=1, num_nodes=2, num_processes=8) ``` In the case of running on the TPU, it would look like so: ```python model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) args = (model, "fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=8) ``` To launch the training process with elasticity, enabling fault tolerance, you can use the `elastic_launch` feature provided by PyTorch. This requires setting additional parameters such as `rdzv_backend` and `max_restarts`. Here is an example of how to use `notebook_launcher` with elastic capabilities: ```python notebook_launcher( training_loop, args, num_processes=2, max_restarts=3 ) ``` As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs: ```python out Launching training on 2 GPUs. epoch 0: 88.12 epoch 1: 91.73 epoch 2: 92.58 epoch 3: 93.90 epoch 4: 94.71 ``` And that's it! Please note that [`notebook_launcher`] ignores the Accelerate config file, to launch based on the config use: ```bash accelerate launch ``` ## Debugging A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems from an import or prior code in the notebook that makes a call to the PyTorch `torch.cuda` sublibrary. To help narrow down what went wrong, you can launch the `notebook_launcher` with `ACCELERATE_DEBUG_MODE=yes` in your environment and an additional check will be made when spawning that a regular process can be created and utilize CUDA without issue. (Your CUDA code can still be ran afterwards). ## Conclusion This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember: - Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`] - Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc) - If using the TPU, declare your model outside the training loop function
accelerate/docs/source/basic_tutorials/notebook.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/notebook.md", "repo_id": "accelerate", "token_count": 5694 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Accelerator The [`Accelerator`] is the main class for enabling distributed training on any type of training setup. Read the [Add Accelerator to your code](../basic_tutorials/migration) tutorial to learn more about how to add the [`Accelerator`] to your script. ## Accelerator[[api]] [[autodoc]] Accelerator ## Utilities [[autodoc]] accelerate.utils.gather_object
accelerate/docs/source/package_reference/accelerator.md/0
{ "file_path": "accelerate/docs/source/package_reference/accelerator.md", "repo_id": "accelerate", "token_count": 289 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Big Model Inference One of the biggest advancements Accelerate provides is [Big Model Inference](../concept_guides/big_model_inference), which allows you to perform inference with models that don't fully fit on your graphics card. This tutorial will show you how to use Big Model Inference in Accelerate and the Hugging Face ecosystem. ## Accelerate A typical workflow for loading a PyTorch model is shown below. `ModelClass` is a model that exceeds the GPU memory of your device (mps or cuda or xpu). ```py import torch my_model = ModelClass(...) state_dict = torch.load(checkpoint_file) my_model.load_state_dict(state_dict) ``` With Big Model Inference, the first step is to init an empty skeleton of the model with the `init_empty_weights` context manager. This doesn't require any memory because `my_model` is "parameterless". ```py from accelerate import init_empty_weights with init_empty_weights(): my_model = ModelClass(...) ``` Next, the weights are loaded into the model for inference. The [`load_checkpoint_and_dispatch`] method loads a checkpoint inside your empty model and dispatches the weights for each layer across all available devices, starting with the fastest devices (GPU, MPS, XPU, NPU, MLU, MUSA) first before moving to the slower ones (CPU and hard drive). Setting `device_map="auto"` automatically fills all available space on the GPU(s) first, then the CPU, and finally, the hard drive (the absolute slowest option) if there is still not enough memory. > [!TIP] > Refer to the [Designing a device map](../concept_guides/big_model_inference#designing-a-device-map) guide for more details on how to design your own device map. ```py from accelerate import load_checkpoint_and_dispatch model = load_checkpoint_and_dispatch( model, checkpoint=checkpoint_file, device_map="auto" ) ``` If there are certain “chunks” of layers that shouldn’t be split, pass them to `no_split_module_classes` (see [here](../concept_guides/big_model_inference#loading-weights) for more details). A models weights can also be sharded into multiple checkpoints to save memory, such as when the `state_dict` doesn't fit in memory (see [here](../concept_guides/big_model_inference#sharded-checkpoints) for more details). Now that the model is fully dispatched, you can perform inference. ```py input = torch.randn(2,3) device_type = next(iter(model.parameters())).device.type input = input.to(device_type) output = model(input) ``` Each time an input is passed through a layer, it is sent from the CPU to the GPU (or disk to CPU to GPU), the output is calculated, and the layer is removed from the GPU going back down the line. While this adds some overhead to inference, it enables you to run any size model on your system, as long as the largest layer fits on your GPU. Multiple GPUs, or "model parallelism", can be utilized but only one GPU will be active at any given moment. This forces the GPU to wait for the previous GPU to send it the output. You should launch your script normally with Python instead of other tools like torchrun and accelerate launch. > [!TIP] > You may also be interested in *pipeline parallelism* which utilizes all available GPUs at once, instead of only having one GPU active at a time. This approach is less flexbile though. For more details, refer to the [Memory-efficient pipeline parallelism](./distributed_inference#memory-efficient-pipeline-parallelism-experimental) guide. <Youtube id="MWCSGj9jEAo"/> Take a look at a full example of Big Model Inference below. ```py import torch from accelerate import init_empty_weights, load_checkpoint_and_dispatch with init_empty_weights(): model = MyModel(...) model = load_checkpoint_and_dispatch( model, checkpoint=checkpoint_file, device_map="auto" ) input = torch.randn(2,3) device_type = next(iter(model.parameters())).device.type input = input.to(device_type) output = model(input) ``` ## Hugging Face ecosystem Other libraries in the Hugging Face ecosystem, like Transformers or Diffusers, supports Big Model Inference in their [`~transformers.PreTrainedModel.from_pretrained`] constructors. You just need to add `device_map="auto"` in [`~transformers.PreTrainedModel.from_pretrained`] to enable Big Model Inference. For example, load Big Sciences T0pp 11 billion parameter model with Big Model Inference. ```py from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto") ``` After loading the model, the empty init and smart dispatch steps from before are executed and the model is fully ready to make use of all the resources in your machine. Through these constructors, you can also save more memory by specifying the `torch_dtype` parameter to load a model in a lower precision. ```py from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto", torch_dtype=torch.float16) ``` ## Next steps For a more detailed explanation of Big Model Inference, make sure to check out the [conceptual guide](../concept_guides/big_model_inference)!
accelerate/docs/source/usage_guides/big_modeling.md/0
{ "file_path": "accelerate/docs/source/usage_guides/big_modeling.md", "repo_id": "accelerate", "token_count": 1618 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Model quantization ## `bitsandbytes` Integration Accelerate brings `bitsandbytes` quantization to your model. You can now load any pytorch model in 8-bit or 4-bit with a few lines of code. If you want to use Transformers models with `bitsandbytes`, you should follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization). To learn more about how the `bitsandbytes` quantization works, check out the blog posts on [8-bit quantization](https://huggingface.co/blog/hf-bitsandbytes-integration) and [4-bit quantization](https://huggingface.co/blog/4bit-transformers-bitsandbytes). ### Pre-Requisites You will need to install the following requirements: - Install `bitsandbytes` library ```bash pip install bitsandbytes ``` For non-cuda devices, you can refer to the bitsandbytes installation guide [here](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend). - Install latest `accelerate` from source ```bash pip install git+https://github.com/huggingface/accelerate.git ``` - Install `minGPT` and `huggingface_hub` to run examples ```bash git clone https://github.com/karpathy/minGPT.git pip install minGPT/ pip install huggingface_hub ``` ### How it works First, we need to initialize our model. To save memory, we can initialize an empty model using the context manager [`init_empty_weights`]. Let's take the GPT2 model from minGPT library. ```py from accelerate import init_empty_weights from mingpt.model import GPT model_config = GPT.get_default_config() model_config.model_type = 'gpt2-xl' model_config.vocab_size = 50257 model_config.block_size = 1024 with init_empty_weights(): empty_model = GPT(model_config) ``` Then, we need to get the path to the weights of your model. The path can be the state_dict file (e.g. "pytorch_model.bin") or a folder containing the sharded checkpoints. ```py from huggingface_hub import snapshot_download weights_location = snapshot_download(repo_id="marcsun13/gpt2-xl-linear-sharded") ``` Finally, you need to set your quantization configuration with [`~utils.BnbQuantizationConfig`]. Here's an example for 8-bit quantization: ```py from accelerate.utils import BnbQuantizationConfig bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, llm_int8_threshold = 6) ``` Here's an example for 4-bit quantization: ```py from accelerate.utils import BnbQuantizationConfig bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4") ``` To quantize your empty model with the selected configuration, you need to use [`~utils.load_and_quantize_model`]. ```py from accelerate.utils import load_and_quantize_model quantized_model = load_and_quantize_model(empty_model, weights_location=weights_location, bnb_quantization_config=bnb_quantization_config, device_map = "auto") ``` ### Saving and loading 8-bit model You can save your 8-bit model with accelerate using [`~Accelerator.save_model`]. ```py from accelerate import Accelerator accelerate = Accelerator() new_weights_location = "path/to/save_directory" accelerate.save_model(quantized_model, new_weights_location) quantized_model_from_saved = load_and_quantize_model(empty_model, weights_location=new_weights_location, bnb_quantization_config=bnb_quantization_config, device_map = "auto") ``` Note that 4-bit model serialization is currently not supported. ### Offload modules to cpu and disk You can offload some modules to cpu/disk if you don't have enough space on the GPU to store the entire model on your GPUs. This uses big model inference under the hood. Check this [documentation](https://huggingface.co/docs/accelerate/usage_guides/big_modeling) for more details. For 8-bit quantization, the selected modules will be converted to 8-bit precision. For 4-bit quantization, the selected modules will be kept in `torch_dtype` that the user passed in `BnbQuantizationConfig`. We will add support to convert these offloaded modules in 4-bit when 4-bit serialization will be possible. You just need to pass a custom `device_map` in order to offload modules on cpu/disk. The offload modules will be dispatched on the GPU when needed. Here's an example : ```py device_map = { "transformer.wte": 0, "transformer.wpe": 0, "transformer.drop": 0, "transformer.h": "cpu", "transformer.ln_f": "disk", "lm_head": "disk", } ``` ### Fine-tune a quantized model It is not possible to perform pure 8bit or 4bit training on these models. However, you can train these models by leveraging parameter efficient fine tuning methods (PEFT) and train for example adapters on top of them. Please have a look at [peft](https://github.com/huggingface/peft) library for more details. Currently, you can't add adapters on top of any quantized model. However, with the official support of adapters with Transformers models, you can fine-tune quantized models. If you want to finetune a Transformers model , follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization) instead. Check out this [demo](https://colab.research.google.com/drive/1VoYNfYDKcKRQRor98Zbf2-9VQTtGJ24k?usp=sharing) on how to fine-tune a 4-bit Transformers model. Note that you don’t need to pass `device_map` when loading the model for training. It will automatically load your model on your GPU. Please note that `device_map=auto` should be used for inference only. ### Example demo - running GPT2 1.5b on a Google Colab Check out the Google Colab [demo](https://colab.research.google.com/drive/1T1pOgewAWVpR9gKpaEWw4orOrzPFb3yM?usp=sharing) for running quantized models on a GPT2 model. The GPT2-1.5B model checkpoint is in FP32 which uses 6GB of memory. After quantization, it uses 1.6GB with 8-bit modules and 1.2GB with 4-bit modules.
accelerate/docs/source/usage_guides/quantization.md/0
{ "file_path": "accelerate/docs/source/usage_guides/quantization.md", "repo_id": "accelerate", "token_count": 1998 }
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a ResNet50 on the Oxford-IIT Pet Dataset # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## # Function to get the label from the filename def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} def training_function(config, args): # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) image_size = config["image_size"] if not isinstance(image_size, (list, tuple)): image_size = (image_size, image_size) # Grab all the image filenames file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")] # Build the label correspondences all_labels = [extract_label(fname) for fname in file_names] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} # Set the seed before splitting the data. np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # Split our filenames between train and validation random_perm = np.random.permutation(len(file_names)) cut = int(0.8 * len(file_names)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training we use a simple RandomResizedCrop train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset( [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id ) # For evaluation, we use a deterministic Resize eval_tfm = Compose([Resize(image_size), ToTensor()]) eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders. train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Freezing the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # We normalize the batches of images to be a bit faster. mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device) std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device) # Instantiate optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25) # Instantiate learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() accurate = 0 num_elems = 0 for _, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["label"])) accurate_preds = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument("--data_dir", required=True, help="The data folder on disk.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(config, args) if __name__ == "__main__": main()
accelerate/examples/cv_example.py/0
{ "file_path": "accelerate/examples/cv_example.py", "repo_id": "accelerate", "token_count": 3215 }
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from ...utils import ( ComputeEnvironment, DistributedType, is_deepspeed_available, is_fp8_available, is_mlu_available, is_mps_available, is_msamp_available, is_musa_available, is_npu_available, is_transformer_engine_available, is_transformers_available, is_xpu_available, ) from ...utils.constants import ( DEEPSPEED_MULTINODE_LAUNCHERS, FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, TORCH_DYNAMO_MODES, ) from .config_args import ClusterConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_distributed_mode, _convert_dynamo_backend, _convert_fp8_backend, _convert_mixed_precision, _convert_yes_no_to_bool, ) def get_cluster_input(): distributed_type = _ask_options( "Which type of machine are you using?", [ "No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "multi-MLU", "multi-MUSA", "TPU", ], _convert_distributed_mode, ) machine_rank = 0 num_machines = 1 num_processes = 1 gpu_ids = None main_process_ip = None main_process_port = None rdzv_backend = "static" same_network = True debug = False if distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, ]: num_machines = _ask_field( "How many different machines will you use (use more than 1 for multi-node training)? [1]: ", int, default=1, ) if num_machines > 1: machine_rank = _ask_options( "What is the rank of this machine?", list(range(num_machines)), int, ) main_process_ip = _ask_field( "What is the IP address of the machine that will host the main process? ", ) main_process_port = _ask_field( "What is the port you will use to communicate with the main process? ", int, ) same_network = _ask_field( "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) if not same_network: rdzv_backend = _ask_field( "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static" ) debug = _ask_field( "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if distributed_type == DistributedType.NO: use_cpu = _ask_field( "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) elif distributed_type == DistributedType.MULTI_CPU: use_cpu = True else: use_cpu = False ipex_config = {} mpirun_config = {} if use_cpu: ipex_config["ipex"] = _ask_field( "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if distributed_type == DistributedType.MULTI_CPU: use_mpirun = _ask_field( "Do you want accelerate to launch mpirun? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_mpirun: mpirun_hostfile = _ask_field( "Please enter the path to the hostfile to use with mpirun [~/hostfile]: ", str, default="~/hostfile", ) mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip()) mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1) if ( not use_cpu and is_xpu_available() and distributed_type not in [ DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA, DistributedType.MULTI_MUSA, ] ): ipex_config["use_xpu"] = _ask_field( "Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) dynamo_config = {} use_dynamo = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_dynamo: prefix = "dynamo_" dynamo_config[prefix + "backend"] = _ask_options( "Which dynamo backend would you like to use?", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) use_custom_options = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_custom_options: dynamo_config[prefix + "mode"] = _ask_options( "Which mode do you want to use?", TORCH_DYNAMO_MODES, lambda x: TORCH_DYNAMO_MODES[int(x)], default=0, ) dynamo_config[prefix + "use_fullgraph"] = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) dynamo_config[prefix + "use_dynamic"] = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) use_mps = not use_cpu and is_mps_available() deepspeed_config = {} if ( distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_XPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.NO, ] and not use_mps ): use_deepspeed = _ask_field( "Do you want to use DeepSpeed? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_deepspeed: distributed_type = DistributedType.DEEPSPEED assert ( is_deepspeed_available() ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source" if distributed_type == DistributedType.DEEPSPEED: use_deepspeed_config = _ask_field( "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_deepspeed_config: deepspeed_config["deepspeed_config_file"] = _ask_field( "Please enter the path to the json DeepSpeed config file: ", str, default="none", ) else: deepspeed_config["zero_stage"] = _ask_options( "What should be your DeepSpeed's ZeRO optimization stage?", [0, 1, 2, 3], int, default=2, ) deepspeed_devices = ["none", "cpu", "nvme"] if deepspeed_config["zero_stage"] >= 2: deepspeed_config["offload_optimizer_device"] = _ask_options( "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] ) deepspeed_config["offload_param_device"] = _ask_options( "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] ) if deepspeed_config["offload_param_device"] == "nvme": deepspeed_config["offload_param_nvme_path"] = _ask_field( "Nvme Path to offload parameters?", str, default="/nvme", ) if deepspeed_config["offload_optimizer_device"] == "nvme": deepspeed_config["offload_optimizer_nvme_path"] = _ask_field( "Nvme Path to offload optimizer states?", str, default="/nvme", ) deepspeed_config["gradient_accumulation_steps"] = _ask_field( "How many gradient accumulation steps you're passing in your script? [1]: ", int, default=1, ) use_gradient_clipping = _ask_field( "Do you want to use gradient clipping? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_gradient_clipping: deepspeed_config["gradient_clipping"] = _ask_field( "What is the gradient clipping value? [1.0]: ", float, default=1.0, ) if deepspeed_config["zero_stage"] == 3: deepspeed_config["zero3_save_16bit_model"] = _ask_field( "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) deepspeed_config["zero3_init_flag"] = _ask_field( "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if deepspeed_config["zero3_init_flag"]: if not is_transformers_available(): raise Exception( "When `zero3_init_flag` is set, it requires Transformers to be installed. " "Please run `pip3 install transformers`." ) use_moe = _ask_field( "Do you want to enable Mixture-of-Experts training (MoE)? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_moe: deepspeed_config["deepspeed_moe_layer_cls_names"] = _ask_field( "Specify the comma-separated list of transformers MoE layer class names (case-sensitive), e.g : " " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... : ", str, ) if num_machines > 1: launcher_query = "Which Type of launcher do you want to use?" deepspeed_config["deepspeed_multinode_launcher"] = _ask_options( launcher_query, DEEPSPEED_MULTINODE_LAUNCHERS, lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)], ) if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]: deepspeed_config["deepspeed_hostfile"] = _ask_field( "DeepSpeed configures multi-node compute resources with hostfile. " "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; " "for more information please refer official [documentation]" "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). " "Please specify the location of hostfile: ", str, ) is_exclusion_filter = _ask_field( "Do you want to specify exclusion filter string? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if is_exclusion_filter: deepspeed_config["deepspeed_exclusion_filter"] = _ask_field( "DeepSpeed exclusion filter string: ", str, ) is_inclusion_filter = _ask_field( "Do you want to specify inclusion filter string? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if is_inclusion_filter: deepspeed_config["deepspeed_inclusion_filter"] = _ask_field( "DeepSpeed inclusion filter string: ", str, ) fsdp_config = {} tp_config = {} if distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_XPU, ]: use_fsdp = _ask_field( "Do you want to use FullyShardedDataParallel? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_fsdp: distributed_type = DistributedType.FSDP if distributed_type == DistributedType.FSDP: sharding_strategy_query = "What should be your sharding strategy?" fsdp_config["fsdp_sharding_strategy"] = _ask_options( sharding_strategy_query, FSDP_SHARDING_STRATEGY, lambda x: FSDP_SHARDING_STRATEGY[int(x)], ) fsdp_config["fsdp_offload_params"] = _ask_field( "Do you want to offload parameters and gradients to CPU? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) fsdp_wrap_query = "What should be your auto wrap policy?" fsdp_config["fsdp_auto_wrap_policy"] = _ask_options( fsdp_wrap_query, FSDP_AUTO_WRAP_POLICY, lambda x: FSDP_AUTO_WRAP_POLICY[int(x)], ) if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]: use_no_split_modules = _ask_field( "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if not use_no_split_modules: fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field( "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :" "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ", str, ) elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]: fsdp_config["fsdp_min_num_params"] = _ask_field( "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ", int, default=100000000, ) fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?" fsdp_config["fsdp_backward_prefetch"] = _ask_options( fsdp_backward_prefetch_query, FSDP_BACKWARD_PREFETCH, lambda x: FSDP_BACKWARD_PREFETCH[int(x)], ) fsdp_state_dict_type_query = "What should be your FSDP's state dict type?" fsdp_config["fsdp_state_dict_type"] = _ask_options( fsdp_state_dict_type_query, FSDP_STATE_DICT_TYPE, lambda x: FSDP_STATE_DICT_TYPE[int(x)], default=2, ) fsdp_config["fsdp_forward_prefetch"] = _ask_field( "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) fsdp_config["fsdp_use_orig_params"] = _ask_field( "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field( "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) if fsdp_config["fsdp_cpu_ram_efficient_loading"]: fsdp_config["fsdp_sync_module_states"] = True else: fsdp_config["fsdp_sync_module_states"] = _ask_field( "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) fsdp_config["fsdp_activation_checkpointing"] = _ask_field( "Do you want to enable FSDP activation checkpointing? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if not use_fsdp: use_tp = _ask_field( "Do you want to use TensorParallel? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_tp: distributed_type = DistributedType.TP if distributed_type == DistributedType.TP: tp_config["tp_size"] = _ask_field( "What should be your Tensor Parallel degree? [1]: ", int, default=1, ) megatron_lm_config = {} if distributed_type in [DistributedType.MULTI_GPU]: use_megatron_lm = _ask_field( "Do you want to use Megatron-LM ? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_megatron_lm: distributed_type = DistributedType.MEGATRON_LM if distributed_type == DistributedType.MEGATRON_LM: prefix = "megatron_lm_" megatron_lm_config[prefix + "tp_degree"] = _ask_field( "What is the Tensor Parallelism degree/size? [1]:", int, default=1, error_message="Please enter an integer.", ) if megatron_lm_config[prefix + "tp_degree"] > 1: megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field( "Do you want to enable Sequence Parallelism? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config[prefix + "pp_degree"] = _ask_field( "What is the Pipeline Parallelism degree/size? [1]:", int, default=1, error_message="Please enter an integer.", ) if megatron_lm_config[prefix + "pp_degree"] > 1: megatron_lm_config[prefix + "num_micro_batches"] = _ask_field( "What is the number of micro-batches? [1]:", int, default=1, error_message="Please enter an integer.", ) megatron_lm_config[prefix + "recompute_activations"] = _ask_field( "Do you want to enable selective activation recomputation? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field( "Do you want to use distributed optimizer " "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message="Please enter yes or no.", ) megatron_lm_config[prefix + "gradient_clipping"] = _ask_field( "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ", float, default=1.0, ) # TPU specific defaults tpu_commands = None tpu_command_file = None tpu_downcast_bf16 = "no" tpu_env = [] tpu_name = None tpu_vm = None tpu_zone = None tpu_use_sudo = False tpu_use_cluster = False if distributed_type in [ DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.XLA, ]: machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "") if machine_type == "TPU": machine_type += " cores" elif machine_type == "CPU": machine_type = "processes" else: machine_type += "(s)" num_processes = _ask_field( f"How many {machine_type} should be used for distributed training? [1]:", int, default=1, error_message="Please enter an integer.", ) elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: num_processes = _ask_field( "How many GPU(s) should be used for distributed training? [1]:", int, default=1, error_message="Please enter an integer.", ) else: num_processes = 1 if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1): raise ValueError( f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using." ) if ( distributed_type in [ DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.NO, ] and not use_cpu and not use_mps ): if is_npu_available(): machine_type = "NPU(s)" elif is_mlu_available(): machine_type = "MLU(s)" elif is_musa_available(): machine_type = "MUSA(s)" elif is_xpu_available(): machine_type = "XPU(s)" else: machine_type = "GPU(s)" gpu_ids = _ask_field( f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:", default="all", ) # CPU affinity is only supported on NVIDIA hardware for now enable_cpu_affinity = False if distributed_type in (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps: enable_cpu_affinity = _ask_field( "Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) fp8_config = None if distributed_type == DistributedType.XLA: mixed_precision = "no" main_training_function = _ask_field( "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ", default="main", ) tpu_use_cluster = _ask_field( "Are you using a TPU cluster? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if tpu_use_cluster: tpu_name = _ask_field( "What is the name of your TPU cluster? ", default=None, error_message="Please enter the name of your TPU cluster.", ) tpu_zone = _ask_field( "What is the zone of your TPU cluster? ", default=None, error_message="Please enter the zone of your TPU cluster.", ) tpu_use_sudo = _ask_field( "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ", default=False, error_message="Please enter yes or no.", ) run_commands = _ask_field( "Do you have code you wish to run on startup in each pod? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if run_commands: use_command_file = _ask_field( "Is this code located in a bash script? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_command_file: tpu_command_file = _ask_field( "What is the path to your bash script? ", default=None, error_message="Please enter the path to your bash script.", ) tpu_command_file = os.path.abspath(tpu_command_file) else: print("Please enter each command seperately you wish to run on startup in each pod.") tpu_commands = [] another_command = True while another_command: tpu_commands.append( _ask_field( "Please enter a single command to be ran ", default=None, error_message="Please enter the commands you wish to run on startup in each pod as a single string.", ) ) another_command = _ask_field( "Do you wish to add another command? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) tpu_vm = _ask_field( "If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ", default="", ).split(",") tpu_env = _ask_field( "What environment variables do you wish to set in each pod, seperated by a comma: ", default="", ).split(",") else: main_training_function = "main" if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config: mixed_precision = None else: mixed_precision = _ask_options( "Do you wish to use mixed precision?", ["no", "fp16", "bf16", "fp8"], _convert_mixed_precision, ) if mixed_precision == "fp8": if not is_fp8_available(): raise ValueError("FP8 (either Transformer Engine or MSAMP) is not installed on this machine.") fp8_config = {} fp8_config["backend"] = _ask_options( "Which FP8 backend do you want to use?", ["te", "msamp"], _convert_fp8_backend, ) if fp8_config["backend"] == "TE": if not is_transformer_engine_available(): raise ValueError("TransformersEngine was selected, but it is not installed on this machine.") fp8_config["use_autocast_during_eval"] = _ask_field( "Do you want to use FP8 autocast during eval mode? Generally better metrics are found when this is disabled [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) fp8_config["margin"] = _ask_field( "What margin should be used for gradient scaling? [0]: ", int, default=0, ) fp8_config["interval"] = _ask_field( "What interval should be used for for how often the scaling factor is recomputed? [1]: ", int, default=1, ) fp8_config["fp8_format"] = _ask_options( "Which weight format should be used?", ["HYBRID", "E4M3"], lambda x: "HYBRID" if x == 0 else "E4M3", default=0, ) fp8_config["amax_history_length"] = _ask_field( "What length of history should be used for the amax scaling factor computation? [1024]: ", int, default=1024, ) fp8_config["amax_compute_algorithm"] = _ask_options( "Which algorithm should be used for the amax scaling factor computation?", ["max", "most_recent"], lambda x: "max" if x == 0 else "most_recent", default=0, ) fp8_config["override_linear_precision"] = _ask_field( "Do you want to to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision? [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) if fp8_config["override_linear_precision"]: fprop = _ask_field( "Should `fprop` be executed in higher precision? [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) dgrad = _ask_field( "Should `dgrad` be executed in higher precision? [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) wgrad = _ask_field( "Should `wgrad` be executed in higher precision? [yes/NO]: ", _convert_yes_no_to_bool, default=False, ) fp8_config["override_linear_precision"] = (fprop, dgrad, wgrad) elif fp8_config["backend"] == "MSAMP": if not is_msamp_available(): raise ValueError("MSAMP was selected, but it is not installed on this machine.") fp8_config["optimization_level"] = _ask_options( "Which optimization level should be used?", ["O1", "O2"], lambda x: "O1" if x == 0 else "O2", default=1, ) if use_dynamo and mixed_precision == "no" and not use_cpu: print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) if distributed_type == DistributedType.XLA and mixed_precision == "bf16": tpu_downcast_bf16 = _ask_field( "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no" ) return ClusterConfig( compute_environment=ComputeEnvironment.LOCAL_MACHINE, distributed_type=distributed_type, num_processes=num_processes, gpu_ids=gpu_ids, mixed_precision=mixed_precision, downcast_bf16=tpu_downcast_bf16, machine_rank=machine_rank, num_machines=num_machines, main_process_ip=main_process_ip, main_process_port=main_process_port, main_training_function=main_training_function, fp8_config=fp8_config, deepspeed_config=deepspeed_config, fsdp_config=fsdp_config, tp_config=tp_config, megatron_lm_config=megatron_lm_config, ipex_config=ipex_config, mpirun_config=mpirun_config, use_cpu=use_cpu, rdzv_backend=rdzv_backend, same_network=same_network, commands=tpu_commands, command_file=tpu_command_file, tpu_env=tpu_env, tpu_name=tpu_name, tpu_vm=tpu_vm, tpu_zone=tpu_zone, tpu_use_sudo=tpu_use_sudo, tpu_use_cluster=tpu_use_cluster, dynamo_config=dynamo_config, debug=debug, enable_cpu_affinity=enable_cpu_affinity, )
accelerate/src/accelerate/commands/config/cluster.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/cluster.py", "repo_id": "accelerate", "token_count": 19323 }
#!/usr/bin/env python # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from accelerate.commands.utils import CustomArgumentParser from accelerate.utils import merge_fsdp_weights description = """Utility to merge the weights from multiple FSDP checkpoints into a single combined checkpoint. Should be used if `SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}`. This is a CPU-bound process and requires enough RAM to load the entire model state dict.""" def merge_command(args): merge_fsdp_weights( args.checkpoint_directory, args.output_path, not args.unsafe_serialization, args.remove_checkpoint_dir ) def merge_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("merge-weights", description=description) else: parser = CustomArgumentParser(description=description) parser.add_argument("checkpoint_directory", type=str, help="A directory containing sharded weights saved by FSDP.") parser.add_argument( "output_path", type=str, help="The path to save the merged weights. Defaults to the current directory. ", ) parser.add_argument( "--unsafe_serialization", action="store_false", default=False, help="Whether to save the merged weights as `.bin` rather than `.safetensors` (not recommended).", ) parser.add_argument( "--remove_checkpoint_dir", action="store_true", help="Whether to remove the checkpoint directory after merging.", default=False, ) if subparsers is not None: parser.set_defaults(func=merge_command) return parser def main(): parser = merge_command_parser() args = parser.parse_args() merge_command(args) if __name__ == "__main__": main()
accelerate/src/accelerate/commands/merge.py/0
{ "file_path": "accelerate/src/accelerate/commands/merge.py", "repo_id": "accelerate", "token_count": 776 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator, DataLoaderConfiguration, GradientAccumulationPlugin from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, set_seed def check_model_parameters(model_a, model_b, did_step, iteration, **kwargs): for param, grad_param in zip(model_a.parameters(), model_b.parameters()): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad, grad_param.grad, **kwargs) is False ), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad, grad_param.grad, **kwargs) is True ), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" def step_model(model, input, target, accelerator, do_backward=True): model.train() output = model(input) loss = F.mse_loss(output, target.to(output.device)) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(loss) def get_training_setup(accelerator, sched=False): "Returns everything needed to perform basic training" set_seed(42) model = RegressionModel() ddp_model = deepcopy(model) dset = RegressionDataset(length=80) dataloader = DataLoader(dset, batch_size=16) model.to(accelerator.device) if sched: opt = AdamW(params=model.parameters(), lr=1e-3) ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3) sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65) ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65) # Make a copy of `model` if sched: ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader) else: ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def test_noop_sync(accelerator): # Test when on a single CPU or GPU that the context manager does nothing model, ddp_model, dataloader = get_training_setup(accelerator) # Use a single batch ddp_input, ddp_target = next(iter(dataloader)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model input, target = accelerator.gather((ddp_input, ddp_target)) input, target = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(model, input, target, accelerator) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(ddp_model): step_model(ddp_model, ddp_input, ddp_target, accelerator) else: # Sync grads step_model(ddp_model, ddp_input, ddp_target, accelerator) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(model, ddp_model, True, iteration) for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): if not param.requires_grad: continue assert torch.allclose( param.grad, ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) ddp_input = ddp_input[torch.randperm(len(ddp_input))] def test_distributed_sync(accelerator): # Test on distributed setup that context manager behaves properly model, ddp_model, dataloader = get_training_setup(accelerator) # Use a single batch ddp_input, ddp_target = next(iter(dataloader)).values() for iteration in range(3): # Gather the distributed inputs and targs for the base model input, target = accelerator.gather((ddp_input, ddp_target)) input, target = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(model, input, target, accelerator) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(ddp_model): step_model(ddp_model, ddp_input, ddp_target, accelerator) else: # Sync grads step_model(ddp_model, ddp_input, ddp_target, accelerator) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad) is False ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad) is True ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) ddp_input = ddp_input[torch.randperm(len(ddp_input))] def test_distributed_sync_multiple_fwd(accelerator): # Test on distributed setup that context manager behaves properly when used with multiple forwards followed by multiple backwards model, ddp_model, dataloader = get_training_setup(accelerator) # Do multiple forwards losses = [] num_iterations = 3 for iteration in range(num_iterations): ddp_input, ddp_target = next(iter(dataloader)).values() # Gather the distributed inputs and targs for the base model input, target = accelerator.gather((ddp_input, ddp_target)) input, target = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(model, input, target, accelerator) # Accumulate grads locally with accelerator.no_sync(ddp_model): ddp_output = ddp_model(ddp_input) loss = F.mse_loss(ddp_output, ddp_target.to(ddp_output.device)) losses.append(loss) # Do multiple backwards and sync only at the last backward for iteration in range(num_iterations): loss = losses[iteration] if iteration < num_iterations - 1: # Accumulate grads locally accelerator.backward(loss) # DDP model and model should only be in sync after last backward for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): if not param.requires_grad: continue # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad) is False ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" else: # Sync grads if last backward with accelerator.trigger_sync_in_backward(ddp_model): accelerator.backward(loss) # DDP model and model should only be in sync after last backward for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): if not param.requires_grad: continue # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad) is True ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" def test_gradient_accumulation(split_batches=False, dispatch_batches=False, sync_each_batch=False): gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch) dataloader_config = DataLoaderConfiguration(split_batches=split_batches, dispatch_batches=dispatch_batches) accelerator = Accelerator( dataloader_config=dataloader_config, gradient_accumulation_plugin=gradient_accumulation_plugin, ) # Test that context manager behaves properly model, ddp_model, dataloader = get_training_setup(accelerator) for iteration, batch in enumerate(dataloader): ddp_input, ddp_target = batch.values() # Gather the distributed inputs and targs for the base model input, target = accelerator.gather((ddp_input, ddp_target)) input, target = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" step_model(model, input, target, accelerator, False) # Do "gradient accumulation" (noop) with accelerator.accumulate(ddp_model): step_model(ddp_model, ddp_input, ddp_target, accelerator) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1) or sync_each_batch: # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad) is True ), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" else: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad) is False ), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) ddp_input = ddp_input[torch.randperm(len(ddp_input))] GradientState._reset_state() def test_gradient_accumulation_with_opt_and_scheduler( split_batches=False, dispatch_batches=False, sync_each_batch=False ): gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch) dataloader_config = DataLoaderConfiguration(split_batches=split_batches, dispatch_batches=dispatch_batches) accelerator = Accelerator( dataloader_config=dataloader_config, gradient_accumulation_plugin=gradient_accumulation_plugin, ) # Test that context manager behaves properly model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True) for iteration, batch in enumerate(dataloader): ddp_input, ddp_target = batch.values() # Gather the distributed inputs and targs for the base model input, target = accelerator.gather((ddp_input, ddp_target)) input, target = input.to(accelerator.device), target.to(accelerator.device) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(model, input, target, accelerator, False) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)): if split_batches: sched.step() else: for _ in range(accelerator.num_processes): sched.step() # Perform gradient accumulation under wrapper with accelerator.accumulate(ddp_model): step_model(ddp_model, ddp_input, ddp_target, accelerator) ddp_opt.step() ddp_sched.step() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader)) if accelerator.num_processes > 1: check_model_parameters( model, ddp_model, did_step or sync_each_batch, # syncs at each grad_accum interval of if sync_each_batch==True iteration, rtol=1e-3, # needs a relative tolerance due to roundoff errors ) if did_step: opt.zero_grad() # flush gradients every accum step ddp_opt.zero_grad() # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration) GradientState._reset_state() def test_dataloader_break(): accelerator = Accelerator() first_dset = RegressionDataset(length=80) first_dataloader = DataLoader(first_dset, batch_size=16) second_dset = RegressionDataset(length=96) second_dataloader = DataLoader(second_dset, batch_size=16) first_dataloader, second_dataloader = accelerator.prepare(first_dataloader, second_dataloader) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(first_dataloader): assert id(accelerator.gradient_state.active_dataloader) == id(first_dataloader) if iteration < len(first_dataloader) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(second_dataloader): assert id(accelerator.gradient_state.active_dataloader) == id(second_dataloader) if batch_num < len(second_dataloader) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def main(): accelerator = Accelerator() state = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**") if state.distributed_type != DistributedType.XLA: test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**") test_noop_sync(accelerator) if state.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_CPU, ): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**") test_distributed_sync(accelerator) if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager with multiple forwards**") test_distributed_sync_multiple_fwd(accelerator) if state.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, ): for split_batch in [True, False]: for dispatch_batches in [True, False]: for sync_each_batch in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, ", f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**", ) test_gradient_accumulation(split_batch, dispatch_batches, sync_each_batch) # Currently will break on torch 2.0 +, need to investigate why if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`, `sync_each_batch=False`**", ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, ): for split_batch in [True, False]: for dispatch_batches in [True, False]: for sync_each_batch in [True, False]: if not split_batch and not dispatch_batches and not sync_each_batch: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**", ) test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches, sync_each_batch) state.destroy_process_group() def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/test_sync.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_sync.py", "repo_id": "accelerate", "token_count": 7926 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from collections.abc import Mapping from typing import Dict, List, Optional, Union import numpy as np import torch from safetensors import safe_open def offload_weight(weight, weight_name, offload_folder, index=None): dtype = None # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16. if str(weight.dtype) == "torch.bfloat16": # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s. weight = weight.view(torch.int16) dtype = "bfloat16" array = weight.cpu().numpy() tensor_file = os.path.join(offload_folder, f"{weight_name}.dat") if index is not None: if dtype is None: dtype = str(array.dtype) index[weight_name] = {"dtype": dtype, "shape": list(array.shape)} if array.ndim == 0: array = array[None] file_array = np.memmap(tensor_file, dtype=array.dtype, mode="w+", shape=array.shape) file_array[:] = array[:] file_array.flush() return index def load_offloaded_weight(weight_file, weight_info): shape = tuple(weight_info["shape"]) if shape == (): # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor shape = (1,) dtype = weight_info["dtype"] if dtype == "bfloat16": # NumPy does not support bfloat16 so this was saved as a int16 dtype = "int16" weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r") if len(weight_info["shape"]) == 0: weight = weight[0] weight = torch.tensor(weight) if weight_info["dtype"] == "bfloat16": weight = weight.view(torch.bfloat16) return weight def save_offload_index(index, offload_folder): if index is None or len(index) == 0: # Nothing to save return offload_index_file = os.path.join(offload_folder, "index.json") if os.path.isfile(offload_index_file): with open(offload_index_file, encoding="utf-8") as f: current_index = json.load(f) else: current_index = {} current_index.update(index) with open(offload_index_file, "w", encoding="utf-8") as f: json.dump(current_index, f, indent=2) def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]): """ Offload a state dict in a given folder. Args: save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict. state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload. """ os.makedirs(save_dir, exist_ok=True) index = {} for name, parameter in state_dict.items(): index = offload_weight(parameter, name, save_dir, index=index) # Update index save_offload_index(index, save_dir) class PrefixedDataset(Mapping): """ Will access keys in a given dataset by adding a prefix. Args: dataset (`Mapping`): Any map with string keys. prefix (`str`): A prefix to add when trying to access any element in the underlying dataset. """ def __init__(self, dataset: Mapping, prefix: str): self.dataset = dataset self.prefix = prefix def __getitem__(self, key): return self.dataset[f"{self.prefix}{key}"] def __iter__(self): return iter([key for key in self.dataset if key.startswith(self.prefix)]) def __len__(self): return len(self.dataset) class OffloadedWeightsLoader(Mapping): """ A collection that loads weights stored in a given state dict or memory-mapped on disk. Args: state_dict (`Dict[str, torch.Tensor]`, *optional*): A dictionary parameter name to tensor. save_folder (`str` or `os.PathLike`, *optional*): The directory in which the weights are stored (by `offload_state_dict` for instance). index (`Dict`, *optional*): A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default to the index saved in `save_folder`. """ def __init__( self, state_dict: Dict[str, torch.Tensor] = None, save_folder: Optional[Union[str, os.PathLike]] = None, index: Mapping = None, device=None, ): if state_dict is None and save_folder is None and index is None: raise ValueError("Need either a `state_dict`, a `save_folder` or an `index` containing offloaded weights.") self.state_dict = {} if state_dict is None else state_dict self.save_folder = save_folder if index is None and save_folder is not None: with open(os.path.join(save_folder, "index.json")) as f: index = json.load(f) self.index = {} if index is None else index self.all_keys = list(self.state_dict.keys()) self.all_keys.extend([key for key in self.index if key not in self.all_keys]) self.device = device def __getitem__(self, key: str): # State dict gets priority if key in self.state_dict: return self.state_dict[key] weight_info = self.index[key] if weight_info.get("safetensors_file") is not None: device = "cpu" if self.device is None else self.device tensor = None try: with safe_open(weight_info["safetensors_file"], framework="pt", device=device) as f: tensor = f.get_tensor(weight_info.get("weight_name", key)) except TypeError: # if failed to get_tensor on the device, such as bf16 on mps, try to load it on CPU first with safe_open(weight_info["safetensors_file"], framework="pt", device="cpu") as f: tensor = f.get_tensor(weight_info.get("weight_name", key)) if "dtype" in weight_info: tensor = tensor.to(getattr(torch, weight_info["dtype"])) if tensor.device != torch.device(device): tensor = tensor.to(device) return tensor weight_file = os.path.join(self.save_folder, f"{key}.dat") return load_offloaded_weight(weight_file, weight_info) def __iter__(self): return iter(self.all_keys) def __len__(self): return len(self.all_keys) def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]): """ Extract the sub state-dict corresponding to a list of given submodules. Args: state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from. submodule_names (`List[str]`): The list of submodule names we want to extract. """ result = {} for module_name in submodule_names: # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance) result.update( { key: param for key, param in state_dict.items() if key == module_name or key.startswith(module_name + ".") } ) return result
accelerate/src/accelerate/utils/offload.py/0
{ "file_path": "accelerate/src/accelerate/utils/offload.py", "repo_id": "accelerate", "token_count": 3177 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import json import os import pickle import tempfile import time from unittest.mock import patch import psutil import torch from parameterized import parameterized from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights, load_checkpoint_and_dispatch from accelerate.accelerator import Accelerator from accelerate.data_loader import DataLoaderDispatcher, DataLoaderShard, skip_first_batches from accelerate.state import GradientState, PartialState from accelerate.test_utils import ( require_bnb, require_cuda_or_xpu, require_huggingface_suite, require_multi_device, require_non_cpu, require_transformer_engine, slow, torch_device, ) from accelerate.test_utils.testing import ( AccelerateTestCase, require_cuda, require_non_torch_xla, require_torchdata_stateful_dataloader, ) from accelerate.utils import FP8RecipeKwargs, is_torchdata_stateful_dataloader_available, patch_environment from accelerate.utils.dataclasses import DataLoaderConfiguration from accelerate.utils.modeling import get_state_dict_from_offload, load_checkpoint_in_model from accelerate.utils.random import set_seed if is_torchdata_stateful_dataloader_available(): from torchdata.stateful_dataloader import StatefulDataLoader class ModelWithTiedWeights(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(2, 4) self.linear2 = torch.nn.Linear(4, 2) self.linear2.weight = self.linear1.weight self.linear2.bias = self.linear1.bias def forward(self, x): return self.linear2(self.linear1(x)) def create_components(tied_weights=False): model = ModelWithTiedWeights() if tied_weights else torch.nn.Linear(2, 4) optimizer = torch.optim.AdamW(model.parameters(), lr=1.0) scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1) train_dl = DataLoader(TensorDataset(torch.tensor([1, 2, 3]))) valid_dl = DataLoader(TensorDataset(torch.tensor([4, 5, 6]))) return model, optimizer, scheduler, train_dl, valid_dl class ModelForTest(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(3, 4) self.batchnorm = torch.nn.BatchNorm1d(4) self.linear2 = torch.nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) def create_dataloaders_for_test(batch_size=3, n_train_batches: int = 12, n_valid_batches: int = 2, num_workers=0): "Generates a tuple of dummy DataLoaders to test with" def get_dataset(n_batches): x = torch.randn(batch_size * n_batches, 3) y = torch.randn(batch_size * n_batches, 5) return TensorDataset(x, y) train_dataset = get_dataset(n_train_batches) valid_dataset = get_dataset(n_valid_batches) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers) valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, num_workers=num_workers) return (train_dataloader, valid_dataloader) def get_signature(model): return sum(param.abs().sum().item() for param in model.parameters()) def load_random_weights(model): if isinstance(model, torch.nn.Linear): state = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict() elif isinstance(model, ModelWithTiedWeights): state = ModelWithTiedWeights().state_dict() model.load_state_dict(state) def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = "use_safetensors" if param.args[0] is True else "use_pytorch" if len(param.args) > 1: param_based_name += "_tied_weights" if param.args[1] is True else "" if len(param.args) > 2: param_based_name += f"_num_workers_{param.args[2]}" if len(param.args) > 3: param_based_name += "_dispatch_batches" if param.args[3] is True else "_no_dispatch_batches" return f"{func.__name__}_{param_based_name}" class AcceleratorTester(AccelerateTestCase): def test_partial_state_after_reset(self): # Verifies that custom getattr errors will be thrown # if the state is reset, but only if trying to # get expected attributes state = PartialState() assert state.num_processes > 0 with self.assertRaises(AttributeError) as cm: state.someotherthing assert "'PartialState' object has no attribute" in str(cm.exception) assert "This happens if `PartialState._reset_state()`" not in str(cm.exception) with self.assertRaises(AttributeError) as cm: state._reset_state() state.num_processes assert "`PartialState` object has no attribute" in str(cm.exception) assert "This happens if `PartialState._reset_state()`" in str(cm.exception) state.someotherthing = "MyValue" assert state.someotherthing == "MyValue" def test_accelerator_state_after_reset(self): # Verifies that custom getattr errors will be thrown # if the state is reset, but only if trying to # get expected attributes accelerator = Accelerator() assert accelerator.num_processes > 0 with self.assertRaises(AttributeError) as cm: accelerator.state.someotherthing assert "'AcceleratorState' object has no attribute" in str(cm.exception) assert "This happens if `AcceleratorState._reset_state()`" not in str(cm.exception) with self.assertRaises(AttributeError) as cm: accelerator.state._reset_state() accelerator.num_processes assert "`AcceleratorState` object has no attribute" in str(cm.exception) assert "This happens if `AcceleratorState._reset_state()`" in str(cm.exception) accelerator.state.someotherthing = "MyValue" assert accelerator.state.someotherthing == "MyValue" @require_non_cpu def test_accelerator_can_be_reinstantiated(self): _ = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type in ["cuda", "mps", "npu", "xpu", "xla"] with self.assertRaises(ValueError): _ = Accelerator(cpu=True) @require_cuda def test_setting_cpu_affinity(self): with patch_environment(accelerate_cpu_affinity=1, accelerate_debug_mode=1): with self.assertLogs("accelerate.utils.environment", level="INFO") as cm: _ = Accelerator() assert any("Assigning" in log for log in cm.output) assert any("cpu cores to process" in log for log in cm.output) def test_mutable_states(self): accelerator = Accelerator() state = GradientState() assert state.num_steps == 1 accelerator.gradient_accumulation_steps = 4 assert state.num_steps == 4 assert state.sync_gradients is True accelerator.sync_gradients = False assert state.sync_gradients is False GradientState._reset_state() def test_prepared_objects_are_referenced(self): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() ( prepared_model, prepared_optimizer, prepared_scheduler, prepared_train_dl, prepared_valid_dl, ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) assert prepared_model in accelerator._models assert prepared_optimizer in accelerator._optimizers assert prepared_scheduler in accelerator._schedulers assert prepared_train_dl in accelerator._dataloaders assert prepared_valid_dl in accelerator._dataloaders def test_free_memory_dereferences_prepared_components(self): accelerator = Accelerator() # Free up refs with empty_cache() and gc.collect() accelerator.free_memory() model, optimizer, scheduler, train_dl, valid_dl = create_components() free_cpu_ram_before = psutil.virtual_memory().available // 1024 // 1024 model, optimizer, scheduler, train_dl, valid_dl = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl ) # Short sleep here makes this test more reliable time.sleep(1e-3) model, optimizer, scheduler, train_dl, valid_dl = accelerator.free_memory( model, optimizer, scheduler, train_dl, valid_dl ) free_cpu_ram_after = psutil.virtual_memory().available // 1024 // 1024 assert len(accelerator._models) == 0 assert len(accelerator._optimizers) == 0 assert len(accelerator._schedulers) == 0 assert len(accelerator._dataloaders) == 0 # The less-than comes *specifically* from CUDA CPU things/won't be present on CPU builds assert free_cpu_ram_after <= free_cpu_ram_before @require_non_torch_xla def test_env_var_device(self): """Tests that setting the torch device with ACCELERATE_TORCH_DEVICE overrides default device.""" PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*args, **kwargs): pass with patch("torch.cuda.set_device", noop), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"): accelerator = Accelerator() assert str(accelerator.state.device) == "cuda:64" @parameterized.expand([(True, True), (True, False), (False, False)], name_func=parameterized_custom_name_func) def test_save_load_model(self, use_safetensors, tied_weights): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components(tied_weights) accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) model_signature = get_signature(model) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname, safe_serialization=use_safetensors) # make sure random weights don't match load_random_weights(model) assert abs(model_signature - get_signature(model)) > 1e-3 # make sure loaded weights match accelerator.load_state(tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_model(self, use_safetensors): accelerator = Accelerator() model = torch.nn.Linear(10, 10) model_signature = get_signature(model) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_model(model, tmpdirname, safe_serialization=use_safetensors) # make sure loaded weights match load_checkpoint_in_model(model, tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_sharded_model(self, use_safetensors): accelerator = Accelerator() inputs = torch.randn(3, 3) model = ModelForTest() expected = model(inputs) with tempfile.TemporaryDirectory() as tmpdirname: # By setting it to 100, we will split the model int 3 shards accelerator.save_model(model, tmpdirname, safe_serialization=use_safetensors, max_shard_size=100) # make sure loaded weights match load_checkpoint_in_model(model, tmpdirname) output = model(inputs) assert torch.allclose(expected, output, atol=1e-5) @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_model_offload(self, use_safetensors): accelerator = Accelerator() device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "cpu"} inputs = torch.randn(3, 3) model = ModelForTest() expected = model(inputs) with tempfile.TemporaryDirectory() as tmp_dir: accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors) # load and save offloaded model load_checkpoint_and_dispatch(model, tmp_dir, device_map=device_map, offload_folder=tmp_dir) accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors) # load weights that were saved from the offloaded model load_checkpoint_and_dispatch(model, tmp_dir) output = model(inputs) assert torch.allclose(expected, output, atol=1e-5) @parameterized.expand([True, False], name_func=parameterized_custom_name_func) @require_non_cpu def test_get_state_dict_from_offload(self, use_safetensors): accelerator = Accelerator() device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "disk"} model = ModelForTest() offloaded_layer_weight = model.linear2.weight with tempfile.TemporaryDirectory() as tmp_dir: accelerator.save_model(model, tmp_dir, safe_serialization=use_safetensors) # load model with offloaded layers load_checkpoint_and_dispatch(model, tmp_dir, device_map=device_map, offload_folder=tmp_dir) cpu_onloaded_layer = get_state_dict_from_offload( model.linear2, "linear2.weight", {"linear2.weight": ""}, device_to_put_offload="cpu" ) device_onloaded_layer = get_state_dict_from_offload( model.linear2, "linear2.weight", {"linear2.weight": ""}, device_to_put_offload=0 ) cpu_onloaded_layer_weight = cpu_onloaded_layer["linear2.weight"] device_onloaded_layer_weight = device_onloaded_layer["linear2.weight"] assert torch.allclose(offloaded_layer_weight, cpu_onloaded_layer_weight) assert torch.allclose( offloaded_layer_weight, device_onloaded_layer_weight.to("cpu") ) # must be on the same device for torch.allclose() assert cpu_onloaded_layer_weight.device.type == "cpu" assert device_onloaded_layer_weight.device.type == torch_device @parameterized.expand([True, False], name_func=parameterized_custom_name_func) def test_save_load_model_with_hooks(self, use_safetensors): accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) model_signature = get_signature(model) # saving hook def save_config(models, weights, output_dir): config = {"class_name": models[0].__class__.__name__} with open(os.path.join(output_dir, "data.json"), "w") as f: json.dump(config, f) # loading hook def load_config(models, input_dir): with open(os.path.join(input_dir, "data.json")) as f: config = json.load(f) models[0].class_name = config["class_name"] save_hook = accelerator.register_save_state_pre_hook(save_config) load_hook = accelerator.register_load_state_pre_hook(load_config) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname, safe_serialization=use_safetensors) # make sure random weights don't match with hooks load_random_weights(model) assert abs(model_signature - get_signature(model)) > 1e-3 # random class name to verify correct one is loaded model.class_name = "random" # make sure loaded weights match with hooks accelerator.load_state(tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 # mode.class_name is loaded from config assert model.class_name == model.__class__.__name__ # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(tmpdirname, safe_serialization=use_safetensors) # make sure random weights don't match with hooks removed load_random_weights(model) assert abs(model_signature - get_signature(model)) > 1e-3 # random class name to verify correct one is loaded model.class_name = "random" # make sure loaded weights match with hooks removed accelerator.load_state(tmpdirname) assert abs(model_signature - get_signature(model)) < 1e-3 # mode.class_name is NOT loaded from config assert model.class_name != model.__class__.__name__ def test_accelerator_none(self): """Just test that passing None to accelerator.prepare() works.""" accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() dummy_obj = None # This should work model, optimizer, scheduler, train_dl, valid_dl, dummy_obj = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl, dummy_obj ) assert dummy_obj is None def test_is_accelerator_prepared(self): """Checks that `_is_accelerator_prepared` is set properly""" accelerator = Accelerator() model, optimizer, scheduler, train_dl, valid_dl = create_components() dummy_obj = [1, 2, 3] # This should work model, optimizer, scheduler, train_dl, valid_dl, dummy_obj = accelerator.prepare( model, optimizer, scheduler, train_dl, valid_dl, dummy_obj ) assert ( getattr(dummy_obj, "_is_accelerate_prepared", False) is False ), "Dummy object should have `_is_accelerate_prepared` set to `True`" assert ( getattr(model, "_is_accelerate_prepared", False) is True ), "Model is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(optimizer, "_is_accelerate_prepared", False) is True ), "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(scheduler, "_is_accelerate_prepared", False) is True ), "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(train_dl, "_is_accelerate_prepared", False) is True ), "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" assert ( getattr(valid_dl, "_is_accelerate_prepared", False) is True ), "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" @require_cuda_or_xpu @slow @require_bnb def test_accelerator_bnb(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map={"": 0}, ) accelerator = Accelerator() # This should work model = accelerator.prepare(model) @require_cuda_or_xpu @slow @require_bnb def test_accelerator_bnb_cpu_error(self): """Tests that the accelerator can be used with the BNB library. This should fail as we are trying to load a model that is loaded between cpu and gpu""" from transformers import AutoModelForCausalLM accelerator = Accelerator() with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) model.tie_weights() device_map = infer_auto_device_map(model) device_map["lm_head"] = "cpu" model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True ) # This should not work and get value error with self.assertRaises(ValueError): model = accelerator.prepare(model) @require_non_torch_xla @slow @require_bnb @require_multi_device def test_accelerator_bnb_multi_device(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM if torch_device == "cuda": PartialState._shared_state = {"distributed_type": DistributedType.MULTI_GPU} elif torch_device == "npu": PartialState._shared_state = {"distributed_type": DistributedType.MULTI_NPU} elif torch_device == "xpu": PartialState._shared_state = {"distributed_type": DistributedType.MULTI_XPU} else: raise ValueError(f"{torch_device} is not supported in test_accelerator_bnb_multi_device.") with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) model.tie_weights() device_map = infer_auto_device_map(model) device_map["lm_head"] = 1 model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map=device_map, ) accelerator = Accelerator() # This should not work and get value error with self.assertRaises(ValueError): _ = accelerator.prepare(model) PartialState._reset_state() @require_non_torch_xla @slow @require_bnb @require_multi_device def test_accelerator_bnb_multi_device_no_distributed(self): """Tests that the accelerator can be used with the BNB library.""" from transformers import AutoModelForCausalLM with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", ) device_map = infer_auto_device_map(model) device_map["lm_head"] = 1 model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map=device_map, ) accelerator = Accelerator() # This should work _ = accelerator.prepare(model) @require_non_cpu def test_accelerator_cpu_flag_prepare(self): model = torch.nn.Linear(10, 10) sgd = torch.optim.SGD(model.parameters(), lr=0.01) accelerator = Accelerator(cpu=True) _ = accelerator.prepare(sgd) @require_transformer_engine def test_can_unwrap_model_te(self): model, optimizer, *_ = create_components() fp8_recipe = FP8RecipeKwargs(backend="TE") accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[fp8_recipe]) inputs = torch.randn(10, 2).to(torch_device) model, optimizer = accelerator.prepare(model, optimizer) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs) @require_non_cpu def test_can_unwrap_model_fp16(self): # test for a regression introduced in #872 # before the fix, after unwrapping with keep_fp32_wrapper=False, there would be the following error: # Linear.forward() missing 1 required positional argument: 'input' model = create_components()[0] accelerator = Accelerator(mixed_precision="fp16") inputs = torch.randn(10, 2).to(torch_device) model = accelerator.prepare(model) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs) def test_can_unwrap_model(self): model = create_components()[0] accelerator = Accelerator(mixed_precision="no", cpu=True) inputs = torch.randn(10, 2) model = accelerator.prepare(model) model(inputs) # sanity check that this works model = accelerator.unwrap_model(model, keep_fp32_wrapper=False) model(inputs) # check that this still works # check that pickle roundtrip works model_loaded = pickle.loads(pickle.dumps(model)) model_loaded(inputs) def test_can_unwrap_distributed_compiled_model_keep_torch_compile(self): model = create_components()[0] accelerator = Accelerator() compiled_model = torch.compile(model) distributed_model = torch.nn.DataParallel(model) distributed_compiled_model = torch.compile(distributed_model) unwrapped_model = accelerator.unwrap_model(distributed_compiled_model, keep_torch_compile=True) assert compiled_model._orig_mod == unwrapped_model._orig_mod def test_can_unwrap_distributed_compiled_model_remove_torch_compile(self): model = create_components()[0] accelerator = Accelerator() compiled_model = torch.compile(model) distributed_model = torch.nn.DataParallel(model) distributed_compiled_model = torch.compile(distributed_model) unwrapped_model = accelerator.unwrap_model(distributed_compiled_model, keep_torch_compile=False) assert compiled_model._orig_mod == unwrapped_model @parameterized.expand([True, False]) def test_can_pickle_dataloader(self, dispatch_batches): """ Test that pickling a prepared dataloader works. """ data = torch.arange(10).to(torch_device) ds = torch.utils.data.TensorDataset(data) dl = torch.utils.data.DataLoader(ds) skip_dl = skip_first_batches(dl, 2) # Currently, StatefulDataLoader doesn't seem to support pickling, so we aren't testing that functionality # TODO: Add support for pickling StatefulDataLoader dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, use_stateful_dataloader=False) accelerator = Accelerator(dataloader_config=dataloader_config) original_dl, _ = accelerator.prepare(dl, skip_dl) if dispatch_batches: assert isinstance(original_dl, DataLoaderDispatcher) else: assert isinstance(original_dl, DataLoaderShard) prepared_model_dumps = pickle.dumps(accelerator) model_loaded = pickle.loads(prepared_model_dumps) assert len(model_loaded._dataloaders) == 2 # Assert equality of recovered and original dataloader loaded_dl = model_loaded._dataloaders[0] assert isinstance(loaded_dl, DataLoader) if dispatch_batches: assert isinstance(loaded_dl, DataLoaderDispatcher) else: assert isinstance(loaded_dl, DataLoaderShard) assert len(loaded_dl) == len(original_dl) assert [i for i in loaded_dl] == [i for i in original_dl] # Test skip dataloader works as expected as well loaded_skip_dl = model_loaded._dataloaders[1] assert isinstance(loaded_skip_dl, DataLoader) if dispatch_batches: assert isinstance(loaded_dl, DataLoaderDispatcher) else: assert isinstance(loaded_dl, DataLoaderShard) assert len(loaded_skip_dl) == len(original_dl) - 2 assert [i for i in loaded_skip_dl] == [i for i in original_dl][2:] # Ideally would be a parameterized test which works with either stateful or non-stateful dataloaders, but dependencies are a bit awkward. @require_torchdata_stateful_dataloader def test_prepared_objects_are_referenced_with_stateful_dataloader(self): """Test that setting `use_stateful_dataloader=True` in `DataLoaderConfiguration` prepares a `StatefulDataLoader` object instead of a `DataLoader` object.""" dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=True) accelerator = Accelerator(dataloader_config=dataloader_config) model, optimizer, scheduler, train_dl, valid_dl = create_components() ( prepared_model, prepared_optimizer, prepared_scheduler, prepared_train_dl, prepared_valid_dl, ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) assert prepared_model in accelerator._models assert prepared_optimizer in accelerator._optimizers assert prepared_scheduler in accelerator._schedulers assert prepared_train_dl in accelerator._dataloaders assert prepared_valid_dl in accelerator._dataloaders assert isinstance(prepared_train_dl, StatefulDataLoader) assert isinstance(prepared_valid_dl, StatefulDataLoader) @parameterized.expand( itertools.product([True, False], [True, False], [0, 2], [True, False]), name_func=parameterized_custom_name_func, ) @require_torchdata_stateful_dataloader def test_save_model_with_stateful_dataloader(self, use_safetensors, tied_weights, num_workers, dispatch_batches): """ Test that saving and loading a model with a stateful dataloader returns the same model, and that the dataloader's iterator is restored properly.""" set_seed(42) n_train_batches = 64 # Use enough batches to ensure we can get partial iterations on large compute dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, use_stateful_dataloader=True) accelerator = Accelerator(dataloader_config=dataloader_config) model, optimizer, scheduler, train_dl, valid_dl = create_components(tied_weights) train_dl, valid_dl = create_dataloaders_for_test(n_train_batches=n_train_batches, num_workers=num_workers) model = ModelForTest() ( prepared_model, prepared_optimizer, prepared_scheduler, prepared_train_dl, prepared_valid_dl, ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) assert isinstance(prepared_train_dl, StatefulDataLoader) assert isinstance(prepared_valid_dl, StatefulDataLoader) # Perform 3 training iterations to ensure the dataloader's iterator is advanced num_batches_to_skip = 3 model.train() untrained_batches = [] with tempfile.TemporaryDirectory() as tmpdirname: for step, batch in enumerate(prepared_train_dl): x, y = batch outputs = prepared_model(x) loss = torch.nn.functional.mse_loss(outputs, y) accelerator.backward(loss) prepared_optimizer.step() prepared_scheduler.step() prepared_optimizer.zero_grad() if step == num_batches_to_skip - 1: # Save the state once we've gone through a few batches accelerator.save_state(f"{tmpdirname}/state", safe_serialization=use_safetensors) if step >= num_batches_to_skip: untrained_batches.append(batch) not_skipped_batches = accelerator.gather(untrained_batches) # We then unwrap the trained model unwrapped_model = accelerator.unwrap_model(prepared_model) original_linear1 = unwrapped_model.linear1.weight.clone() original_batchnorm = unwrapped_model.batchnorm.weight.clone() original_linear2 = unwrapped_model.linear2.weight.clone() # Resume the state accelerator.load_state(f"{tmpdirname}/state") # Train this to the end of the DataLoader batches_seen_with_loaded_dl = 0 for batch in prepared_train_dl: x, y = batch outputs = prepared_model(x) loss = torch.nn.functional.mse_loss(outputs, y) accelerator.backward(loss) prepared_optimizer.step() prepared_scheduler.step() prepared_optimizer.zero_grad() batches_seen_with_loaded_dl += 1 unwrapped_model_2 = accelerator.unwrap_model(prepared_model) new_linear1 = unwrapped_model_2.linear1.weight new_batchnorm = unwrapped_model_2.batchnorm.weight new_linear2 = unwrapped_model_2.linear2.weight # Assert equalities assert batches_seen_with_loaded_dl == len(not_skipped_batches) assert torch.allclose(original_linear1, new_linear1) assert torch.allclose(original_batchnorm, new_batchnorm) assert torch.allclose(original_linear2, new_linear2) @require_non_cpu @require_huggingface_suite def test_nested_hook(self): from transformers.modeling_utils import PretrainedConfig, PreTrainedModel class MyLinear(torch.nn.Module): def __init__(self, device=None, dtype=None): factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.centroid = torch.nn.Embedding(1, 2) self.indices = torch.nn.Parameter(torch.empty((1, 2, 2), **factory_kwargs)) def forward(self, x): orig_shape = x.shape x = torch.abs(x + self.indices).long() x = x % 2 x = x.sum(-1) x = (self.centroid.weight + x).reshape(orig_shape) return x class MySubModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = MyLinear() def forward(self, x): return self.layer(x) class MyModel(PreTrainedModel): def __init__(self, config): super().__init__(config) self.layer = torch.nn.ModuleList([MySubModel() for i in range(4)]) def forward(self, x): for layer in self.layer: x = layer(x) return x with tempfile.TemporaryDirectory() as tmpdirname: check_point = tmpdirname offload_folder = check_point + "/offload" os.makedirs(offload_folder, exist_ok=True) config = PretrainedConfig() m = MyModel(config) m.save_pretrained(check_point) with init_empty_weights(): my_model = MyModel(config) my_model = load_checkpoint_and_dispatch( my_model, checkpoint=check_point, max_memory={"cpu": 60, 0: 60}, device_map="auto", no_split_module_classes=["MySubModel"], offload_folder=offload_folder, preload_module_classes=["MyLinear"], ) # before fix, this would raise an error # weight is on the meta device, we need a `value` to put in on 0 x = torch.randn(1, 2) my_model(x)
accelerate/tests/test_accelerator.py/0
{ "file_path": "accelerate/tests/test_accelerator.py", "repo_id": "accelerate", "token_count": 15183 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import sys from accelerate.test_utils import require_transformer_engine from accelerate.test_utils.testing import TempDirTestCase, require_import_timer from accelerate.utils import is_import_timer_available if is_import_timer_available(): from import_timer import calculate_total_time, read_import_profile from import_timer.core import get_paths_above_threshold, sort_nodes_by_total_time def convert_list_to_string(data): end_result = "" arrow_right = "->" for path in data: end_result += f"{arrow_right.join(path[0])} {path[1]:.3f}s\n" return end_result def run_import_time(command: str): output = subprocess.run([sys.executable, "-X", "importtime", "-c", command], capture_output=True, text=True) return output.stderr @require_import_timer class ImportSpeedTester(TempDirTestCase): """ Test suite which checks if imports have seen slowdowns based on a particular baseline. If the error messages are not clear enough to get a full view of what is slowing things down (or to figure out how deep the initial depth should be), please view the profile with the `tuna` framework: `tuna import.log`. """ clear_on_setup = False @classmethod def setUpClass(cls): super().setUpClass() output = run_import_time("import torch") data = read_import_profile(output) total_time = calculate_total_time(data) cls.pytorch_time = total_time def test_base_import(self): output = run_import_time("import accelerate") data = read_import_profile(output) total_time = calculate_total_time(data) pct_more = (total_time - self.pytorch_time) / self.pytorch_time * 100 # Base import should never be more than 20% slower than raw torch import err_msg = f"Base import is more than 20% slower than raw torch import ({pct_more:.2f}%), please check the attached `tuna` profile:\n" sorted_data = sort_nodes_by_total_time(data) paths_above_threshold = get_paths_above_threshold(sorted_data, 0.05, max_depth=7) err_msg += f"\n{convert_list_to_string(paths_above_threshold)}" self.assertLess(pct_more, 20, err_msg) def test_cli_import(self): output = run_import_time("from accelerate.commands.launch import launch_command_parser") data = read_import_profile(output) total_time = calculate_total_time(data) pct_more = (total_time - self.pytorch_time) / self.pytorch_time * 100 # Base import should never be more than 20% slower than raw torch import err_msg = f"Base import is more than 20% slower than raw torch import ({pct_more:.2f}%), please check the attached `tuna` profile:\n" sorted_data = sort_nodes_by_total_time(data) paths_above_threshold = get_paths_above_threshold(sorted_data, 0.05, max_depth=7) err_msg += f"\n{convert_list_to_string(paths_above_threshold)}" self.assertLess(pct_more, 20, err_msg) @require_transformer_engine class LazyImportTester(TempDirTestCase): """ Test suite which checks if specific packages are lazy-loaded. Eager-import will trigger circular import in some case, e.g. in huggingface/accelerate#3056. """ def test_te_import(self): output = run_import_time("import accelerate, accelerate.utils.transformer_engine") self.assertFalse(" transformer_engine" in output, "`transformer_engine` should not be imported on import")
accelerate/tests/test_imports.py/0
{ "file_path": "accelerate/tests/test_imports.py", "repo_id": "accelerate", "token_count": 1442 }
[workspace] members = [ "candle-core", "candle-datasets", "candle-examples", "candle-book", "candle-nn", "candle-pyo3", "candle-transformers", "candle-wasm-examples/*", "candle-wasm-tests", "tensor-tools", ] exclude = [ "candle-flash-attn", "candle-kernels", "candle-metal-kernels", "candle-onnx", ] resolver = "2" [workspace.package] version = "0.8.2" edition = "2021" description = "Minimalist ML framework." repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [workspace.dependencies] ab_glyph = "0.2.23" accelerate-src = { version = "0.3.2" } anyhow = { version = "1", features = ["backtrace"] } byteorder = "1.4.3" candle = { path = "./candle-core", package = "candle-core", version = "0.8.2" } candle-datasets = { path = "./candle-datasets", version = "0.8.2" } candle-flash-attn = { path = "./candle-flash-attn", version = "0.8.2" } candle-kernels = { path = "./candle-kernels", version = "0.8.2" } candle-metal-kernels = { path = "./candle-metal-kernels", version = "0.8.2" } candle-nn = { path = "./candle-nn", version = "0.8.2" } candle-onnx = { path = "./candle-onnx", version = "0.8.2" } candle-transformers = { path = "./candle-transformers", version = "0.8.2" } clap = { version = "4.2.4", features = ["derive"] } criterion = { version = "0.5.1", default-features=false } cudarc = { version = "0.13.0", features = ["std", "cublas", "cublaslt", "curand", "driver", "nvrtc", "f16", "cuda-version-from-build-system", "dynamic-linking"], default-features=false } fancy-regex = "0.13.0" gemm = { version = "0.17.0", features = ["wasm-simd128-enable"] } hf-hub = "0.4.1" half = { version = "2.3.1", features = ["num-traits", "use-intrinsics", "rand_distr"] } hound = "3.5.1" image = { version = "0.25.2", default-features = false, features = ["jpeg", "png"] } imageproc = { version = "0.24.0", default-features = false } intel-mkl-src = { version = "0.8.1", features = ["mkl-static-lp64-iomp"] } libc = { version = "0.2.147" } log = "0.4" memmap2 = { version = "0.9.3", features = ["stable_deref_trait"] } num_cpus = "1.15.0" num-traits = "0.2.15" parquet = { version = "51.0.0" } rand = "0.8.5" rand_distr = "0.4.3" rayon = "1.7.0" safetensors = "0.4.1" serde = { version = "1.0.171", features = ["derive"] } serde_plain = "1.0.2" serde_json = "1.0.99" thiserror = "1" tokenizers = { version = "0.19.1", default-features = false } tracing = "0.1.37" tracing-chrome = "0.7.1" tracing-subscriber = "0.3.7" ug = "0.1.0" ug-cuda = "0.1.0" ug-metal = "0.1.0" yoke = { version = "0.7.2", features = ["derive"] } zip = { version = "1.1.1", default-features = false } metal = { version = "0.27.0", features = ["mps"]} [profile.release-with-debug] inherits = "release" debug = true
candle/Cargo.toml/0
{ "file_path": "candle/Cargo.toml", "repo_id": "candle", "token_count": 1246 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle_core::{Device, Tensor}; fn main() -> Result<()> { let a = Tensor::new(&[[0.0f32, 1.0, 2.0], [3.0, 4.0, 5.0]], &Device::Cpu)?; let b = Tensor::new(&[[88.0f32, 99.0]], &Device::Cpu)?; let new_a = a.slice_scatter(&b, 1, 2)?; assert_eq!(a.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]); assert_eq!(new_a.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]); Ok(()) }
candle/candle-core/examples/basics.rs/0
{ "file_path": "candle/candle-core/examples/basics.rs", "repo_id": "candle", "token_count": 287 }
/// Helper functions to write CPU kernels. use crate::backend::BackendStorage; use crate::{Error, Layout, Result, WithDType}; type C = super::CpuStorage; pub trait Map1 { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>>; fn map(&self, vs: &C, layout: &Layout) -> Result<C> { match vs { C::U8(vs) => Ok(C::U8(self.f(vs, layout)?)), C::U32(vs) => Ok(C::U32(self.f(vs, layout)?)), C::I64(vs) => Ok(C::I64(self.f(vs, layout)?)), C::BF16(vs) => Ok(C::BF16(self.f(vs, layout)?)), C::F16(vs) => Ok(C::F16(self.f(vs, layout)?)), C::F32(vs) => Ok(C::F32(self.f(vs, layout)?)), C::F64(vs) => Ok(C::F64(self.f(vs, layout)?)), } } } pub trait Map1Any { fn f<T: WithDType, W: Fn(Vec<T>) -> C>(&self, vs: &[T], layout: &Layout, wrap: W) -> Result<C>; fn map(&self, vs: &C, layout: &Layout) -> Result<C> { match vs { C::U8(vs) => Ok(self.f(vs, layout, C::U8)?), C::U32(vs) => Ok(self.f(vs, layout, C::U32)?), C::I64(vs) => Ok(self.f(vs, layout, C::I64)?), C::BF16(vs) => Ok(self.f(vs, layout, C::BF16)?), C::F16(vs) => Ok(self.f(vs, layout, C::F16)?), C::F32(vs) => Ok(self.f(vs, layout, C::F32)?), C::F64(vs) => Ok(self.f(vs, layout, C::F64)?), } } } pub trait Map2 { const OP: &'static str; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, v2: &[T], l2: &Layout) -> Result<Vec<T>>; fn map(&self, v1: &C, l1: &Layout, v2: &C, l2: &Layout) -> Result<C> { match (v1, v2) { (C::U8(v1), C::U8(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::U32(v1), C::U32(v2)) => Ok(C::U32(self.f(v1, l1, v2, l2)?)), (C::I64(v1), C::I64(v2)) => Ok(C::I64(self.f(v1, l1, v2, l2)?)), (C::BF16(v1), C::BF16(v2)) => Ok(C::BF16(self.f(v1, l1, v2, l2)?)), (C::F16(v1), C::F16(v2)) => Ok(C::F16(self.f(v1, l1, v2, l2)?)), (C::F32(v1), C::F32(v2)) => Ok(C::F32(self.f(v1, l1, v2, l2)?)), (C::F64(v1), C::F64(v2)) => Ok(C::F64(self.f(v1, l1, v2, l2)?)), _ => Err(Error::DTypeMismatchBinaryOp { lhs: v1.dtype(), rhs: v2.dtype(), op: Self::OP, } .bt()), } } } pub trait Map2U8 { const OP: &'static str; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, v2: &[T], l2: &Layout) -> Result<Vec<u8>>; fn map(&self, v1: &C, l1: &Layout, v2: &C, l2: &Layout) -> Result<C> { match (v1, v2) { (C::U8(v1), C::U8(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::U32(v1), C::U32(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::I64(v1), C::I64(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::BF16(v1), C::BF16(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F16(v1), C::F16(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F32(v1), C::F32(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F64(v1), C::F64(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), _ => Err(Error::DTypeMismatchBinaryOp { lhs: v1.dtype(), rhs: v2.dtype(), op: Self::OP, } .bt()), } } } pub fn binary_map<T: Copy, U: Copy, F: FnMut(T, T) -> U>( lhs_l: &Layout, rhs_l: &Layout, lhs: &[T], rhs: &[T], mut f: F, ) -> Vec<U> { match (lhs_l.contiguous_offsets(), rhs_l.contiguous_offsets()) { (Some((o_l1, o_l2)), Some((o_r1, o_r2))) => lhs[o_l1..o_l2] .iter() .zip(rhs[o_r1..o_r2].iter()) .map(|(&l, &r)| f(l, r)) .collect(), (Some((o_l1, o_l2)), None) => { // TODO: Maybe we want to avoid going through the layout twice. match rhs_l.offsets_b() { Some(ob) => { let mut i_in_block = 0; let mut i_right_broadcast = 0; lhs[o_l1..o_l2] .iter() .map(|&l| { let r = unsafe { rhs.get_unchecked(i_in_block + ob.start) }; i_right_broadcast += 1; if i_right_broadcast >= ob.right_broadcast { i_in_block += 1; i_right_broadcast = 0; } if i_in_block >= ob.len { i_in_block = 0 } f(l, *r) }) .collect() } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } (None, Some((o_r1, o_r2))) => { // TODO: Maybe we want to avoid going through the layout twice. match lhs_l.offsets_b() { Some(ob) => { let mut i_in_block = 0; let mut i_right_broadcast = 0; rhs[o_r1..o_r2] .iter() .map(|&r| { let l = unsafe { lhs.get_unchecked(i_in_block + ob.start) }; i_right_broadcast += 1; if i_right_broadcast >= ob.right_broadcast { i_in_block += 1; i_right_broadcast = 0; } if i_in_block >= ob.len { i_in_block = 0 } f(*l, r) }) .collect() } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } _ => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } // Similar to binary_map but with vectorized variants. pub fn binary_map_vec<T: Copy, F: FnMut(T, T) -> T, FV: FnMut(&[T], &[T], &mut [T])>( lhs_l: &Layout, rhs_l: &Layout, lhs: &[T], rhs: &[T], mut f: F, mut f_vec: FV, ) -> Vec<T> { let el_count = lhs_l.shape().elem_count(); match (lhs_l.contiguous_offsets(), rhs_l.contiguous_offsets()) { (Some((o_l1, o_l2)), Some((o_r1, o_r2))) => { let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<T>], &mut [T]>(ys_to_set) }; f_vec(&lhs[o_l1..o_l2], &rhs[o_r1..o_r2], ys_to_set); // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } (Some((o_l1, o_l2)), None) => match rhs_l.offsets_b() { Some(ob) if ob.right_broadcast == 1 => { let rhs = &rhs[ob.start..ob.start + ob.len]; let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<T>], &mut [T]>(ys_to_set) }; let mut dst_i = 0; for src_i in (o_l1..o_l2).step_by(ob.len) { f_vec( &lhs[src_i..src_i + ob.len], rhs, &mut ys_to_set[dst_i..dst_i + ob.len], ); dst_i += ob.len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } Some(ob) => { let rhs = &rhs[ob.start..ob.start + ob.len]; let mut ys = lhs[o_l1..o_l2].to_vec(); for idx_l in 0..ob.left_broadcast { let start = idx_l * ob.len * ob.right_broadcast; for (i, &r) in rhs.iter().enumerate() { let start = start + i * ob.right_broadcast; for v in ys[start..start + ob.right_broadcast].iter_mut() { *v = f(*v, r) } } } ys } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), }, (None, Some((o_r1, o_r2))) => match lhs_l.offsets_b() { Some(ob) if ob.right_broadcast == 1 => { let lhs = &lhs[ob.start..ob.start + ob.len]; let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<T>], &mut [T]>(ys_to_set) }; let mut dst_i = 0; for src_i in (o_r1..o_r2).step_by(ob.len) { f_vec( lhs, &rhs[src_i..src_i + ob.len], &mut ys_to_set[dst_i..dst_i + ob.len], ); dst_i += ob.len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } Some(ob) => { let lhs = &lhs[ob.start..ob.start + ob.len]; let mut ys = rhs[o_r1..o_r2].to_vec(); for idx_l in 0..ob.left_broadcast { let start = idx_l * ob.len * ob.right_broadcast; for (i, &l) in lhs.iter().enumerate() { let start = start + i * ob.right_broadcast; for v in ys[start..start + ob.right_broadcast].iter_mut() { *v = f(l, *v) } } } ys } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), }, _ => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } pub fn unary_map<T: Copy, U: Copy, F: FnMut(T) -> U>( vs: &[T], layout: &Layout, mut f: F, ) -> Vec<U> { match layout.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => vs [start_offset..start_offset + len] .iter() .map(|&v| f(v)) .collect(), crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let mut result = Vec::with_capacity(layout.shape().elem_count()); // Specialize the case where block_len is one to avoid the second loop. if block_len == 1 { for index in block_start_index { let v = unsafe { vs.get_unchecked(index) }; result.push(f(*v)) } } else { for index in block_start_index { for offset in 0..block_len { let v = unsafe { vs.get_unchecked(index + offset) }; result.push(f(*v)) } } } result } } } pub fn unary_map_vec<T: Copy, U: Copy, F: FnMut(T) -> U, FV: FnMut(&[T], &mut [U])>( vs: &[T], layout: &Layout, mut f: F, mut f_vec: FV, ) -> Vec<U> { match layout.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => { let mut ys: Vec<U> = Vec::with_capacity(len); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<U>], &mut [U]>(ys_to_set) }; f_vec(&vs[start_offset..start_offset + len], ys_to_set); // SAFETY: values are all set by f_vec. unsafe { ys.set_len(len) }; ys } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let el_count = layout.shape().elem_count(); // Specialize the case where block_len is one to avoid the second loop. if block_len == 1 { let mut result = Vec::with_capacity(el_count); for index in block_start_index { let v = unsafe { vs.get_unchecked(index) }; result.push(f(*v)) } result } else { let mut ys: Vec<U> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<&mut [std::mem::MaybeUninit<U>], &mut [U]>(ys_to_set) }; let mut dst_index = 0; for src_index in block_start_index { let vs = &vs[src_index..src_index + block_len]; let ys = &mut ys_to_set[dst_index..dst_index + block_len]; f_vec(vs, ys); dst_index += block_len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } } } }
candle/candle-core/src/cpu_backend/utils.rs/0
{ "file_path": "candle/candle-core/src/cpu_backend/utils.rs", "repo_id": "candle", "token_count": 9033 }
use crate::{DType, Result}; use candle_metal_kernels::Kernels; use metal::{Buffer, CommandBuffer, CommandQueue, MTLResourceOptions, NSUInteger}; use std::collections::HashMap; use std::path::Path; use std::sync::{Arc, Mutex, RwLock}; use super::MetalError; /// Unique identifier for cuda devices. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct DeviceId(usize); impl DeviceId { pub(crate) fn new() -> Self { // https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805 use std::sync::atomic; static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1); Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed)) } } type BufferMap = HashMap<(NSUInteger, MTLResourceOptions), Vec<Arc<Buffer>>>; pub(crate) struct Commands { /// Single command queue for the entire device. command_queue: CommandQueue, /// One command buffer at a time. /// The scheduler works by allowing multiple /// [ComputeCommandEncoder](https://developer.apple.com/documentation/metal/mtlcomputecommandencoder?language=objc) /// on a single command buffer. Using a single command buffer would be fastest on the GPU but /// prevents overlapping of CPU and GPU commands (because command buffer needs to be committed /// to start to work). /// Despite what the documentation says, command buffers are NOT ordered. They are ordered /// for their START time, but there's no guarantee that command buffer1 will finish before /// command buffer2 starts (or there are metal bugs there) command_buffer: CommandBuffer, /// Keeps track of the current amount of compute command encoders on the current /// command buffer /// Arc, RwLock because of the interior mutability. command_buffer_index: usize, /// The maximum amount of [compute command encoder](https://developer.apple.com/documentation/metal/mtlcomputecommandencoder?language=objc) per [command buffer](https://developer.apple.com/documentation/metal/mtlcommandbuffer?language=objc) compute_per_buffer: usize, } impl Commands { pub(crate) fn new(command_queue: CommandQueue) -> Result<Self> { let command_buffer = command_queue.new_command_buffer().to_owned(); command_buffer.enqueue(); let compute_per_buffer = match std::env::var("CANDLE_METAL_COMPUTE_PER_BUFFER") { Ok(val) => val.parse()?, _ => 50, }; Ok(Self { command_queue, command_buffer, command_buffer_index: 0, compute_per_buffer, }) } pub fn command_buffer(&mut self) -> Result<(bool, CommandBuffer)> { let mut command_buffer = self.command_buffer.to_owned(); let mut flushed = false; if self.command_buffer_index > self.compute_per_buffer { self.command_buffer.commit(); command_buffer = self.command_queue.new_command_buffer().to_owned(); self.command_buffer = command_buffer.clone(); self.command_buffer_index = 0; flushed = true; } self.command_buffer_index += 1; Ok((flushed, command_buffer)) } pub fn wait_until_completed(&mut self) -> Result<()> { match self.command_buffer.status() { metal::MTLCommandBufferStatus::Committed | metal::MTLCommandBufferStatus::Scheduled | metal::MTLCommandBufferStatus::Completed => { panic!("Already committed"); } _ => {} } self.command_buffer.commit(); self.command_buffer.wait_until_completed(); self.command_buffer = self.command_queue.new_command_buffer().to_owned(); Ok(()) } } #[derive(Clone)] pub struct MetalDevice { /// Unique identifier, the registryID is not sufficient as it identifies the GPU rather than /// the device itself. pub(crate) id: DeviceId, /// Raw metal device: <https://developer.apple.com/documentation/metal/mtldevice?language=objc> pub(crate) device: metal::Device, pub(crate) commands: Arc<RwLock<Commands>>, /// Simple allocator struct. /// The buffers are stored in size buckets since ML tends to use similar shapes over and over. /// We store the buffers in [`Arc`] because it's much faster than Obj-c internal ref counting /// (could be linked to FFI communication overhead). /// /// Whenever a buffer has a strong_count==1, we can reuse it, it means it was dropped in the /// graph calculation, and only we the allocator kept a reference to it, therefore it's free /// to be reused. However, in order for this to work, we need to guarantee the order of /// operation, so that this buffer is not being used by another kernel at the same time. /// Arc is the CPU reference count, it doesn't mean anything on the GPU side of things. /// /// Whenever we actually allocate a new buffer, we make a full sweep to clean up unused buffers /// (strong_count = 1). pub(crate) buffers: Arc<RwLock<BufferMap>>, /// Simple keeper struct to keep track of the already compiled kernels so we can reuse them. /// Heavily used by [`candle_metal_kernels`] pub(crate) kernels: Arc<Kernels>, /// Seed for random number generation. pub(crate) seed: Arc<Mutex<Buffer>>, } impl std::fmt::Debug for MetalDevice { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "MetalDevice({:?})", self.id) } } impl std::ops::Deref for MetalDevice { type Target = metal::DeviceRef; fn deref(&self) -> &Self::Target { &self.device } } impl MetalDevice { #[cfg(not(target_arch = "wasm32"))] pub fn compile( &self, func_name: &'static str, kernel: ug::lang::ssa::Kernel, ) -> Result<metal::ComputePipelineState> { let mut buf = vec![]; ug_metal::code_gen::gen(&mut buf, func_name, &kernel)?; let metal_code = String::from_utf8(buf)?; let lib = self .device .new_library_with_source(&metal_code, &metal::CompileOptions::new()) .map_err(MetalError::from)?; let func = lib .get_function(func_name, None) .map_err(MetalError::from)?; let pl = self .device .new_compute_pipeline_state_with_function(&func) .map_err(MetalError::from)?; Ok(pl) } pub fn id(&self) -> DeviceId { self.id } pub fn metal_device(&self) -> &metal::Device { &self.device } fn drop_unused_buffers(&self) -> Result<()> { let mut buffers = self.buffers.write().map_err(MetalError::from)?; for subbuffers in buffers.values_mut() { let newbuffers = subbuffers .iter() .filter(|s| Arc::strong_count(*s) > 1) .map(Arc::clone) .collect(); *subbuffers = newbuffers; } Ok(()) } pub fn command_buffer(&self) -> Result<CommandBuffer> { let mut commands = self.commands.write().map_err(MetalError::from)?; let (flushed, command_buffer) = commands.command_buffer()?; if flushed { self.drop_unused_buffers()? } Ok(command_buffer) } pub fn wait_until_completed(&self) -> Result<()> { let mut commands = self.commands.write().map_err(MetalError::from)?; commands.wait_until_completed() } pub fn kernels(&self) -> &Kernels { &self.kernels } pub fn device(&self) -> &metal::Device { &self.device } /// Creates a new buffer (not necessarily zeroed). /// The buffer is [MTLPrivate](https://developer.apple.com/documentation/metal/mtlstoragemode) /// This means the buffer data cannot be read on the CPU directly. /// /// [`name`] is only used to keep track of the resource origin in case of bugs pub fn new_buffer( &self, element_count: usize, dtype: DType, name: &str, ) -> Result<Arc<Buffer>> { let size = (element_count * dtype.size_in_bytes()) as NSUInteger; self.allocate_buffer(size, MTLResourceOptions::StorageModePrivate, name) } /// Creates a new buffer (not necessarily zeroed). /// The buffer is [MTLManaged](https://developer.apple.com/documentation/metal/mtlstoragemode) /// This means the buffer can be read on the CPU but will require manual /// synchronization when the CPU memory is modified /// Used as a bridge to gather data back from the GPU pub fn new_buffer_managed(&self, size: NSUInteger) -> Result<Arc<Buffer>> { self.allocate_buffer(size, MTLResourceOptions::StorageModeManaged, "managed") } /// Creates a new buffer from data. /// The buffer is [MTLManaged](https://developer.apple.com/documentation/metal/mtlstoragemode) /// /// Does not require synchronization, as [newBufferWithBytes](https://developer.apple.com/documentation/metal/mtldevice/1433429-newbufferwithbytes) /// allocates the buffer and copies over the existing data before returning the MTLBuffer. pub fn new_buffer_with_data<T>(&self, data: &[T]) -> Result<Arc<Buffer>> { let size = core::mem::size_of_val(data) as NSUInteger; let new_buffer = self.device.new_buffer_with_data( data.as_ptr().cast(), size, MTLResourceOptions::StorageModeManaged, ); let mut buffers = self.buffers.write().map_err(MetalError::from)?; let subbuffers = buffers .entry((size, MTLResourceOptions::StorageModeManaged)) .or_insert(vec![]); let new_buffer = Arc::new(new_buffer); subbuffers.push(new_buffer.clone()); Ok(new_buffer) } pub fn allocate_zeros(&self, size_in_bytes: usize) -> Result<Arc<Buffer>> { let buffer = self.allocate_buffer( size_in_bytes as NSUInteger, MTLResourceOptions::StorageModePrivate, "allocate_zeros", )?; let command_buffer = self.command_buffer()?; command_buffer.set_label("zeros"); let blit = command_buffer.new_blit_command_encoder(); blit.fill_buffer( &buffer, metal::NSRange { location: 0, length: buffer.length(), }, 0, ); blit.end_encoding(); Ok(buffer) } /// The critical allocator algorithm fn allocate_buffer( &self, size: NSUInteger, option: MTLResourceOptions, _name: &str, ) -> Result<Arc<Buffer>> { let mut buffers = self.buffers.write().map_err(MetalError::from)?; if let Some(b) = find_available_buffer(size, option, &buffers) { // Cloning also ensures we increment the strong count return Ok(b.clone()); } let size = buf_size(size); let subbuffers = buffers.entry((size, option)).or_insert(vec![]); let new_buffer = self.device.new_buffer(size as NSUInteger, option); let new_buffer = Arc::new(new_buffer); subbuffers.push(new_buffer.clone()); Ok(new_buffer) } /// Create a metal GPU capture trace on [`path`]. pub fn capture<P: AsRef<Path>>(&self, path: P) -> Result<()> { let capture = metal::CaptureManager::shared(); let descriptor = metal::CaptureDescriptor::new(); descriptor.set_destination(metal::MTLCaptureDestination::GpuTraceDocument); descriptor.set_capture_device(self); // The [set_output_url] call requires an absolute path so we convert it if needed. if path.as_ref().is_absolute() { descriptor.set_output_url(path); } else { let path = std::env::current_dir()?.join(path); descriptor.set_output_url(path); } capture .start_capture(&descriptor) .map_err(MetalError::from)?; Ok(()) } } fn buf_size(size: NSUInteger) -> NSUInteger { size.saturating_sub(1).next_power_of_two() as NSUInteger } fn find_available_buffer( size: NSUInteger, option: MTLResourceOptions, buffers: &BufferMap, ) -> Option<Arc<Buffer>> { let mut best_buffer: Option<&Arc<Buffer>> = None; let mut best_buffer_size: NSUInteger = NSUInteger::MAX; for ((buffer_size, buffer_option), subbuffers) in buffers.iter() { if buffer_size >= &size && buffer_size < &best_buffer_size && buffer_option == &option { for sub in subbuffers { if Arc::strong_count(sub) == 1 { best_buffer = Some(sub); best_buffer_size = *buffer_size; } } } } best_buffer.cloned() }
candle/candle-core/src/metal_backend/device.rs/0
{ "file_path": "candle/candle-core/src/metal_backend/device.rs", "repo_id": "candle", "token_count": 5226 }
use super::k_quants::{BlockQ2K, BlockQ4K, BlockQ4_0, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K}; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; use half::f16; use core::arch::wasm32::*; #[inline(always)] pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (x, y) in xs.iter().zip(ys.iter()) { let x1234 = v128_load(x.qs.as_ptr() as *const v128); let x12 = v128_and(x1234, u8x16_splat(0x0F)); let x12 = i8x16_sub(x12, i8x16_splat(8)); let x34 = u8x16_shr(x1234, 4); let x34 = i8x16_sub(x34, i8x16_splat(8)); let x1 = i16x8_extend_low_i8x16(x12); let y1 = i16x8_load_extend_i8x8(y.qs.as_ptr()); let sum_xy = i32x4_dot_i16x8(x1, y1); let x2 = i16x8_extend_high_i8x16(x12); let y2 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(8)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x2, y2)); let x3 = i16x8_extend_low_i8x16(x34); let y3 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(16)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x3, y3)); let x4 = i16x8_extend_high_i8x16(x34); let y4 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(24)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x4, y4)); let sum_xy = f32x4_convert_i32x4(sum_xy); // f32x4_relaxed_madd is nightly only. let d = f32x4_splat(f16::to_f32(x.d) * f16::to_f32(y.d)); let scaled = f32x4_mul(sum_xy, d); acc = f32x4_add(acc, scaled) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } } #[inline(always)] pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (x, y) in xs.iter().zip(ys.iter()) { let x1 = i16x8_load_extend_i8x8(x.qs.as_ptr()); let y1 = i16x8_load_extend_i8x8(y.qs.as_ptr()); let sum_xy = i32x4_dot_i16x8(x1, y1); let x2 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(8)); let y2 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(8)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x2, y2)); let x3 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(16)); let y3 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(16)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x3, y3)); let x4 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(24)); let y4 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(24)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x4, y4)); let sum_xy = f32x4_convert_i32x4(sum_xy); // f32x4_relaxed_madd is nightly only. let d = f32x4_splat(f16::to_f32(x.d) * f16::to_f32(y.d)); let scaled = f32x4_mul(sum_xy, d); acc = f32x4_add(acc, scaled) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } } #[inline(always)] pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } unsafe { let mut sumf = f32x4_splat(0f32); for (x, y) in xs.iter().zip(ys.iter()) { let mut q2: &[_] = &x.qs; let mut q8: &[_] = &y.qs; let sc = &x.scales; let mut summs = i32x4_splat(0); for i in (0..(QK_K / 16)).step_by(4) { let bsums = i32x4_load_extend_i16x4(y.bsums.as_ptr().add(i)); let scales = i32x4_shr( i32x4( sc[i] as i32, sc[i + 1] as i32, sc[i + 2] as i32, sc[i + 3] as i32, ), 4, ); summs = i32x4_add(summs, i32x4_mul(bsums, scales)) } let summs = f32x4_convert_i32x4(summs); let dall = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let mut isum = i32x4_splat(0); let mut is = 0; for _ in 0..(QK_K / 128) { let mut shift = 0; for _ in 0..4 { let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = i16x8_splat(0); for l in (0..16).step_by(8) { let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(l)); let q2 = i16x8_load_extend_u8x8(q2.as_ptr().add(l)); let q2 = v128_and(i16x8_shr(q2, shift), i16x8_splat(3)); isuml = i16x8_add(isuml, i16x8_mul(q2, q8)) } let dd = i32x4_splat(d); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_low_i16x8(isuml), dd)); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_high_i16x8(isuml), dd)); let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = i16x8_splat(0); for l in (16..32).step_by(8) { let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(l)); let q2 = i16x8_load_extend_u8x8(q2.as_ptr().add(l)); let q2 = v128_and(i16x8_shr(q2, shift), i16x8_splat(3)); isuml = i16x8_add(isuml, i16x8_mul(q2, q8)) } let dd = i32x4_splat(d); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_low_i16x8(isuml), dd)); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_high_i16x8(isuml), dd)); shift += 2; // adjust the indexing q8 = &q8[32..]; } // adjust the indexing q2 = &q2[32..]; } let isum = f32x4_convert_i32x4(isum); sumf = f32x4_add( sumf, f32x4_sub( f32x4_mul(isum, f32x4_splat(dall)), f32x4_mul(summs, f32x4_splat(dmin)), ), ); } let sumf = f32x4_extract_lane::<0>(sumf) + f32x4_extract_lane::<1>(sumf) + f32x4_extract_lane::<2>(sumf) + f32x4_extract_lane::<3>(sumf); Ok(sumf) } } #[inline(always)] pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; let mut utmp: [u32; 4] = [0; 4]; let mut scales: [u8; 8] = [0; 8]; let mut mins: [u8; 8] = [0; 8]; let mut aux8: [u8; QK_K] = [0; QK_K]; let mut sums = f32x4_splat(0f32); unsafe { for (y, x) in ys.iter().zip(xs.iter()) { let q4 = &x.qs; let q8 = &y.qs; for j in 0..QK_K / 64 { let q4_1 = v128_load(q4.as_ptr().add(32 * j) as *const v128); let q4_2 = v128_load(q4.as_ptr().add(32 * j + 16) as *const v128); v128_store( aux8.as_mut_ptr().add(64 * j) as *mut v128, v128_and(q4_1, u8x16_splat(0x0F)), ); v128_store( aux8.as_mut_ptr().add(64 * j + 16) as *mut v128, v128_and(q4_2, u8x16_splat(0x0F)), ); v128_store( aux8.as_mut_ptr().add(64 * j + 32) as *mut v128, u8x16_shr(q4_1, 4), ); v128_store( aux8.as_mut_ptr().add(64 * j + 48) as *mut v128, u8x16_shr(q4_2, 4), ); } LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; //extract scales and mins LittleEndian::write_u32_into(&utmp[0..2], &mut scales); LittleEndian::write_u32_into(&utmp[2..4], &mut mins); let mut sumi = i32x4_splat(0); for j in (0..QK_K / 16).step_by(4) { let bsums = i32x4_load_extend_i16x4(y.bsums.as_ptr().add(j)); let (m1, m2) = (mins[j / 2] as i32, mins[j / 2 + 1] as i32); let mins = i32x4(m1, m1, m2, m2); sumi = i32x4_add(sumi, i32x4_mul(bsums, mins)); } let mut aux32 = i32x4_splat(0i32); for (scale_i, scale) in scales.iter().enumerate() { let scale = i32x4_splat(*scale as i32); for j in 0..4 { let i = 32 * scale_i + 8 * j; let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(i)); let aux8 = i16x8_load_extend_u8x8(aux8.as_ptr().add(i)); let aux16 = i16x8_mul(q8, aux8); aux32 = i32x4_add(aux32, i32x4_mul(scale, i32x4_extend_low_i16x8(aux16))); aux32 = i32x4_add(aux32, i32x4_mul(scale, i32x4_extend_high_i16x8(aux16))); } } let aux32 = f32x4_convert_i32x4(aux32); let d = f32x4_splat(x.d.to_f32() * y.d); sums = f32x4_add(sums, f32x4_mul(aux32, d)); let dmin = x.dmin.to_f32() * y.d; let dmin = f32x4_splat(dmin); let sumi = f32x4_convert_i32x4(sumi); sums = f32x4_sub(sums, f32x4_mul(sumi, dmin)); } let sums = f32x4_extract_lane::<0>(sums) + f32x4_extract_lane::<1>(sums) + f32x4_extract_lane::<2>(sums) + f32x4_extract_lane::<3>(sums); Ok(sums) } } #[inline(always)] pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}") } let mut aux8 = [0i8; QK_K]; unsafe { let mut sums = f32x4_splat(0f32); for (x, y) in xs.iter().zip(ys.iter()) { let q4 = &x.ql; let qh = &x.qh; let q8 = &y.qs; let mut aux32 = f32x4_splat(0f32); for j in (0..QK_K).step_by(128) { let aux8 = aux8.as_mut_ptr().add(j); let q4 = &q4.as_ptr().add(j / 2); let qh = &qh.as_ptr().add(j / 4); for l in (0..32).step_by(16) { // aux8[l] = (((q4[l] & 0xF) | ((qh[l] & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( v128_and(v128_load(q4.add(l) as *const v128), u8x16_splat(0xF)), u8x16_shl( v128_and(v128_load(qh.add(l) as *const v128), u8x16_splat(3)), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 32] = // (((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( v128_and(v128_load(q4.add(l + 32) as *const v128), u8x16_splat(0xF)), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 2), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 32) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 64] = (((q4[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( u8x16_shr(v128_load(q4.add(l) as *const v128), 4), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 4), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 64) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 96] = // (((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( u8x16_shr(v128_load(q4.add(l + 32) as *const v128), 4), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 6), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 96) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); } } for (j, &scale) in x.scales.iter().enumerate() { let scale = f32x4_splat(scale as f32); for offset in [0, 8] { let aux16 = i16x8_mul( i16x8_load_extend_i8x8(q8.as_ptr().add(16 * j + offset)), i16x8_load_extend_i8x8(aux8.as_ptr().add(16 * j + offset)), ); aux32 = f32x4_add( aux32, f32x4_mul(f32x4_convert_i32x4(i32x4_extend_low_i16x8(aux16)), scale), ); aux32 = f32x4_add( aux32, f32x4_mul(f32x4_convert_i32x4(i32x4_extend_high_i16x8(aux16)), scale), ); } } let d = f32x4_splat(x.d.to_f32() * y.d); sums = f32x4_add(sums, f32x4_mul(aux32, d)); } let sums = f32x4_extract_lane::<0>(sums) + f32x4_extract_lane::<1>(sums) + f32x4_extract_lane::<2>(sums) + f32x4_extract_lane::<3>(sums); Ok(sums) } } #[inline(always)] pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % QK_K != 0 { crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (xs, ys) in xs.iter().zip(ys.iter()) { let x_qs = xs.qs.as_ptr(); let y_qs = ys.qs.as_ptr(); let mut sumi = i32x4_splat(0); for j in (0..QK_K).step_by(8) { let xs = i16x8_load_extend_i8x8(x_qs.add(j)); let ys = i16x8_load_extend_i8x8(y_qs.add(j)); let sum_xy = i32x4_dot_i16x8(xs, ys); sumi = i32x4_add(sumi, sum_xy) } let d = f32x4_splat(xs.d * ys.d); acc = f32x4_add(acc, f32x4_mul(f32x4_convert_i32x4(sumi), d)) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } }
candle/candle-core/src/quantized/simd128.rs/0
{ "file_path": "candle/candle-core/src/quantized/simd128.rs", "repo_id": "candle", "token_count": 11617 }
use anyhow::Result; use candle_core::{DType, Device::Cpu, Tensor}; #[test] fn display_scalar() -> Result<()> { let t = Tensor::new(1234u32, &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[1234]\nTensor[[], u32]"); let t = t.to_dtype(DType::F32)?.neg()?; let s = format!("{}", (&t / 10.0)?); assert_eq!(&s, "[-123.4000]\nTensor[[], f32]"); let s = format!("{}", (&t / 1e8)?); assert_eq!(&s, "[-1.2340e-5]\nTensor[[], f32]"); let s = format!("{}", (&t * 1e8)?); assert_eq!(&s, "[-1.2340e11]\nTensor[[], f32]"); let s = format!("{}", (&t * 0.)?); assert_eq!(&s, "[0.]\nTensor[[], f32]"); Ok(()) } #[test] fn display_vector() -> Result<()> { let t = Tensor::new::<&[u32; 0]>(&[], &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[]\nTensor[[0], u32]"); let t = Tensor::new(&[0.1234567, 1.0, -1.2, 4.1, f64::NAN], &Cpu)?; let s = format!("{t}"); assert_eq!( &s, "[ 0.1235, 1.0000, -1.2000, 4.1000, NaN]\nTensor[[5], f64]" ); let t = (Tensor::ones(50, DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42.] Tensor[[50], f32]"#; assert_eq!(&s, expected); let t = (Tensor::ones(11000, DType::F32, &Cpu)? * 42.)?; let s = format!("{t}"); assert_eq!( &s, "[42., 42., 42., ..., 42., 42., 42.]\nTensor[[11000], f32]" ); Ok(()) } #[test] fn display_multi_dim() -> Result<()> { let t = (Tensor::ones((200, 100), DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]] Tensor[[200, 100], f32]"#; assert_eq!(&s, expected); let t = t.reshape(&[2, 1, 1, 100, 100])?; let t = format!("\n{t}"); let expected = r#" [[[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]], [[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]]] Tensor[[2, 1, 1, 100, 100], f32]"#; assert_eq!(&t, expected); Ok(()) }
candle/candle-core/tests/display_tests.rs/0
{ "file_path": "candle/candle-core/tests/display_tests.rs", "repo_id": "candle", "token_count": 1395 }
# candle-distilbert DistilBert is a distiled version of the Bert model. ## Sentence embeddings DistilBert is used to compute the sentence embeddings for a prompt. The model weights are downloaded from the hub on the first run. ```bash cargo run --example distilbert --release -- --prompt "Here is a test sentence" > [[[ 0.5109, 0.1280, -0.2635, ..., 0.3462, -1.0434, 0.1441], > [ 0.1735, 0.0818, -0.5549, ..., 0.3472, -0.8264, -0.0244], > [ 0.0702, -0.1311, -0.4914, ..., 0.3483, -0.6194, 0.1829], > ... > [ 0.2993, -0.0106, -0.4640, ..., 0.2844, -0.6732, 0.0042], > [ 0.1066, -0.0081, -0.4299, ..., 0.3435, -0.7729, 0.0190], > [ 0.8903, 0.2055, -0.2541, ..., 0.3208, -0.6585, 0.0586]]] > Tensor[[1, 7, 768], f32] ```
candle/candle-examples/examples/distilbert/README.md/0
{ "file_path": "candle/candle-examples/examples/distilbert/README.md", "repo_id": "candle", "token_count": 367 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle_transformers::models::jina_bert::{BertModel, Config, PositionEmbeddingType}; use anyhow::Error as E; use candle::{DType, Module, Tensor}; use candle_nn::VarBuilder; use clap::Parser; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// When set, compute embeddings for this prompt. #[arg(long)] prompt: Option<String>, /// The number of times to run the prompt. #[arg(long, default_value = "1")] n: usize, /// L2 normalization for embeddings. #[arg(long, default_value = "true")] normalize_embeddings: bool, #[arg(long)] tokenizer: Option<String>, #[arg(long)] model: Option<String>, #[arg(long)] model_file: Option<String>, } impl Args { fn build_model_and_tokenizer(&self) -> anyhow::Result<(BertModel, tokenizers::Tokenizer)> { use hf_hub::{api::sync::Api, Repo, RepoType}; let model_name = match self.model.as_ref() { Some(model) => model.to_string(), None => "jinaai/jina-embeddings-v2-base-en".to_string(), }; let model = match &self.model_file { Some(model_file) => std::path::PathBuf::from(model_file), None => Api::new()? .repo(Repo::new(model_name.to_string(), RepoType::Model)) .get("model.safetensors")?, }; let tokenizer = match &self.tokenizer { Some(file) => std::path::PathBuf::from(file), None => Api::new()? .repo(Repo::new(model_name.to_string(), RepoType::Model)) .get("tokenizer.json")?, }; let device = candle_examples::device(self.cpu)?; let tokenizer = tokenizers::Tokenizer::from_file(tokenizer).map_err(E::msg)?; let config = Config::new( tokenizer.get_vocab_size(true), 768, 12, 12, 3072, candle_nn::Activation::Gelu, 8192, 2, 0.02, 1e-12, 0, PositionEmbeddingType::Alibi, ); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }; let model = BertModel::new(vb, &config)?; Ok((model, tokenizer)) } } fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { println!("tracing..."); let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let start = std::time::Instant::now(); let (model, mut tokenizer) = args.build_model_and_tokenizer()?; let device = &model.device; if let Some(prompt) = args.prompt { let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; let tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; println!("Loaded and encoded {:?}", start.elapsed()); let start = std::time::Instant::now(); let embeddings = model.forward(&token_ids)?; let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; println!("pooled_embeddigns: {embeddings}"); let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; if args.normalize_embeddings { println!("normalized_embeddings: {embeddings}"); } println!("Took {:?}", start.elapsed()); } else { let sentences = [ "The cat sits outside", "A man is playing guitar", "I love pasta", "The new movie is awesome", "The cat plays in the garden", "A woman watches TV", "The new movie is so great", "Do you like pizza?", ]; let n_sentences = sentences.len(); if let Some(pp) = tokenizer.get_padding_mut() { pp.strategy = tokenizers::PaddingStrategy::BatchLongest } else { let pp = tokenizers::PaddingParams { strategy: tokenizers::PaddingStrategy::BatchLongest, ..Default::default() }; tokenizer.with_padding(Some(pp)); } let tokens = tokenizer .encode_batch(sentences.to_vec(), true) .map_err(E::msg)?; let token_ids = tokens .iter() .map(|tokens| { let tokens = tokens.get_ids().to_vec(); Tensor::new(tokens.as_slice(), device) }) .collect::<candle::Result<Vec<_>>>()?; let token_ids = Tensor::stack(&token_ids, 0)?; println!("running inference on batch {:?}", token_ids.shape()); let embeddings = model.forward(&token_ids)?; println!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; println!("pooled embeddings {:?}", embeddings.shape()); let mut similarities = vec![]; for i in 0..n_sentences { let e_i = embeddings.get(i)?; for j in (i + 1)..n_sentences { let e_j = embeddings.get(j)?; let sum_ij = (&e_i * &e_j)?.sum_all()?.to_scalar::<f32>()?; let sum_i2 = (&e_i * &e_i)?.sum_all()?.to_scalar::<f32>()?; let sum_j2 = (&e_j * &e_j)?.sum_all()?.to_scalar::<f32>()?; let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt(); similarities.push((cosine_similarity, i, j)) } } similarities.sort_by(|u, v| v.0.total_cmp(&u.0)); for &(score, i, j) in similarities[..5].iter() { println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j]) } } Ok(()) } pub fn normalize_l2(v: &Tensor) -> candle::Result<Tensor> { v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?) }
candle/candle-examples/examples/jina-bert/main.rs/0
{ "file_path": "candle/candle-examples/examples/jina-bert/main.rs", "repo_id": "candle", "token_count": 3414 }
# candle-marian-mt `marian-mt` is a neural machine translation model. In this example it is used to translate text from French to English. See the associated [model card](https://huggingface.co/Helsinki-NLP/opus-mt-tc-big-fr-en) for details on the model itself. ## Running an example ```bash cargo run --example marian-mt --release -- \ --text "Demain, dès l'aube, à l'heure où blanchit la campagne, Je partirai. Vois-tu, je sais que tu m'attends. J'irai par la forêt, j'irai par la montagne. Je ne puis demeurer loin de toi plus longtemps." ``` ``` <NIL> Tomorrow, at dawn, at the time when the country is whitening, I will go. See, I know you are waiting for me. I will go through the forest, I will go through the mountain. I cannot stay far from you any longer.</s> ``` ## Generating the tokenizer.json files You can use the following script to generate the `tokenizer.json` config files from the hf-hub repos. This requires the `tokenizers` and `sentencepiece` packages to be install and use the `convert_slow_tokenizer.py` script from this directory. ```python from convert_slow_tokenizer import MarianConverter from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en", use_fast=False) fast_tokenizer = MarianConverter(tokenizer, index=0).converted() fast_tokenizer.save(f"tokenizer-marian-base-fr.json") fast_tokenizer = MarianConverter(tokenizer, index=1).converted() fast_tokenizer.save(f"tokenizer-marian-base-en.json") ```
candle/candle-examples/examples/marian-mt/README.md/0
{ "file_path": "candle/candle-examples/examples/marian-mt/README.md", "repo_id": "candle", "token_count": 497 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::mobilenetv4; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { Small, Medium, Large, HybridMedium, HybridLarge, } impl Which { fn model_filename(&self) -> String { let name = match self { Self::Small => "conv_small.e2400_r224", Self::Medium => "conv_medium.e500_r256", Self::HybridMedium => "hybrid_medium.ix_e550_r256", Self::Large => "conv_large.e600_r384", Self::HybridLarge => "hybrid_large.ix_e600_r384", }; format!("timm/mobilenetv4_{}_in1k", name) } fn resolution(&self) -> u32 { match self { Self::Small => 224, Self::Medium => 256, Self::HybridMedium => 256, Self::Large => 384, Self::HybridLarge => 384, } } fn config(&self) -> mobilenetv4::Config { match self { Self::Small => mobilenetv4::Config::small(), Self::Medium => mobilenetv4::Config::medium(), Self::HybridMedium => mobilenetv4::Config::hybrid_medium(), Self::Large => mobilenetv4::Config::large(), Self::HybridLarge => mobilenetv4::Config::hybrid_large(), } } } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(value_enum, long, default_value_t=Which::Small)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image(args.image, args.which.resolution() as usize)? .to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let model_name = args.which.model_filename(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); api.get("model.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = mobilenetv4::mobilenetv4(&args.which.config(), 1000, vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/mobilenetv4/main.rs/0
{ "file_path": "candle/candle-examples/examples/mobilenetv4/main.rs", "repo_id": "candle", "token_count": 1443 }
# PaliGemma [HuggingFace Model Card](https://huggingface.co/google/paligemma-3b-pt-224) - [Model Page](https://ai.google.dev/gemma/docs/paligemma) ```bash cargo run --features cuda --release --example paligemma -- \ --prompt "caption fr" --image candle-examples/examples/yolo-v8/assets/bike.jpg ``` ``` loaded image with shape Tensor[dims 1, 3, 224, 224; bf16, cuda:0] loaded the model in 1.267744448s caption fr. Un groupe de cyclistes qui sont dans la rue. 13 tokens generated (56.52 token/s) ``` ```bash cargo run --features cuda --release --example paligemma -- \ --prompt "caption fr" --image candle-examples/examples/flux/assets/flux-robot.jpg ``` ``` loaded image with shape Tensor[dims 1, 3, 224, 224; bf16, cuda:0] loaded the model in 1.271492621s caption fr une image d' un robot sur la plage avec le mot rouillé 15 tokens generated (62.78 token/s) ```
candle/candle-examples/examples/paligemma/README.md/0
{ "file_path": "candle/candle-examples/examples/paligemma/README.md", "repo_id": "candle", "token_count": 339 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use std::io::Write; use tokenizers::Tokenizer; use candle::quantized::{ggml_file, gguf_file}; use candle::Tensor; use candle_transformers::generation::{LogitsProcessor, Sampling}; use candle_examples::token_output_stream::TokenOutputStream; use candle_transformers::models::quantized_llama as model; use model::ModelWeights; const DEFAULT_PROMPT: &str = "My favorite theorem is "; #[derive(Debug)] enum Prompt { Interactive, Chat, One(String), } #[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)] enum Which { #[value(name = "7b")] L7b, #[value(name = "13b")] L13b, #[value(name = "70b")] L70b, #[value(name = "7b-chat")] L7bChat, #[value(name = "13b-chat")] L13bChat, #[value(name = "70b-chat")] L70bChat, #[value(name = "7b-code")] L7bCode, #[value(name = "13b-code")] L13bCode, #[value(name = "32b-code")] L34bCode, #[value(name = "7b-leo")] Leo7b, #[value(name = "13b-leo")] Leo13b, #[value(name = "7b-mistral")] Mistral7b, #[value(name = "7b-mistral-instruct")] Mistral7bInstruct, #[value(name = "7b-mistral-instruct-v0.2")] Mistral7bInstructV02, #[value(name = "7b-zephyr-a")] Zephyr7bAlpha, #[value(name = "7b-zephyr-b")] Zephyr7bBeta, #[value(name = "7b-open-chat-3.5")] OpenChat35, #[value(name = "7b-starling-a")] Starling7bAlpha, #[value(name = "mixtral")] Mixtral, #[value(name = "mixtral-instruct")] MixtralInstruct, #[value(name = "llama3-8b")] L8b, #[value(name = "phi3")] Phi3, #[value(name = "SmoLM2-360M-Instruct")] SmolLM2_360MInstruct, #[value(name = "SmoLM2-1.7B-Instruct")] SmolLM2_1BInstruct, } impl Which { fn is_mistral(&self) -> bool { match self { Self::L7b | Self::L13b | Self::L70b | Self::L7bChat | Self::L13bChat | Self::L70bChat | Self::L7bCode | Self::L13bCode | Self::L34bCode | Self::Leo7b | Self::Leo13b | Self::L8b | Self::Phi3 | Self::SmolLM2_1BInstruct | Self::SmolLM2_360MInstruct => false, // Zephyr and OpenChat are fine tuned versions of mistral and should be treated in the // same way. Starling is a fine tuned version of OpenChat. Self::OpenChat35 | Self::Starling7bAlpha | Self::Zephyr7bAlpha | Self::Zephyr7bBeta | Self::Mixtral | Self::MixtralInstruct | Self::Mistral7b | Self::Mistral7bInstruct | Self::Mistral7bInstructV02 => true, } } fn is_zephyr(&self) -> bool { match self { Self::L7b | Self::L13b | Self::L70b | Self::L7bChat | Self::L13bChat | Self::L70bChat | Self::L7bCode | Self::L13bCode | Self::L34bCode | Self::Leo7b | Self::Leo13b | Self::Mixtral | Self::MixtralInstruct | Self::Mistral7b | Self::Mistral7bInstruct | Self::Mistral7bInstructV02 | Self::OpenChat35 | Self::Starling7bAlpha | Self::L8b | Self::SmolLM2_1BInstruct | Self::SmolLM2_360MInstruct | Self::Phi3 => false, Self::Zephyr7bAlpha | Self::Zephyr7bBeta => true, } } fn is_open_chat(&self) -> bool { match self { Self::L7b | Self::L13b | Self::L70b | Self::L7bChat | Self::L13bChat | Self::L70bChat | Self::L7bCode | Self::L13bCode | Self::L34bCode | Self::Leo7b | Self::Leo13b | Self::Mixtral | Self::MixtralInstruct | Self::Mistral7b | Self::Mistral7bInstruct | Self::Mistral7bInstructV02 | Self::Zephyr7bAlpha | Self::Zephyr7bBeta | Self::L8b | Self::SmolLM2_1BInstruct | Self::SmolLM2_360MInstruct | Self::Phi3 => false, Self::OpenChat35 | Self::Starling7bAlpha => true, } } fn tokenizer_repo(&self) -> &'static str { match self { Self::L7b | Self::L13b | Self::L70b | Self::L7bChat | Self::L13bChat | Self::L70bChat | Self::L7bCode | Self::L13bCode | Self::L34bCode => "hf-internal-testing/llama-tokenizer", Self::Leo7b => "LeoLM/leo-hessianai-7b", Self::Leo13b => "LeoLM/leo-hessianai-13b", Self::Mixtral => "mistralai/Mixtral-8x7B-v0.1", Self::MixtralInstruct => "mistralai/Mixtral-8x7B-Instruct-v0.1", Self::Mistral7b | Self::Mistral7bInstruct | Self::Mistral7bInstructV02 | Self::Zephyr7bAlpha | Self::Zephyr7bBeta => "mistralai/Mistral-7B-v0.1", Self::OpenChat35 => "openchat/openchat_3.5", Self::Starling7bAlpha => "berkeley-nest/Starling-LM-7B-alpha", Self::L8b => "meta-llama/Meta-Llama-3-8B", Self::Phi3 => "microsoft/Phi-3-mini-4k-instruct", Self::SmolLM2_360MInstruct => "HuggingFaceTB/SmolLM2-360M-Instruct", Self::SmolLM2_1BInstruct => "HuggingFaceTB/SmolLM2-1.7B-Instruct", } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// GGML/GGUF file to load, typically a .bin/.gguf file generated by the quantize command from llama.cpp #[arg(long)] model: Option<String>, /// The initial prompt, use 'interactive' for entering multiple prompts in an interactive way /// and 'chat' for an interactive model where history of previous prompts and generated tokens /// is preserved. #[arg(long)] prompt: Option<String>, /// The length of the sample to generate (in tokens). #[arg(short = 'n', long, default_value_t = 1000)] sample_len: usize, /// The tokenizer config in json format. #[arg(long)] tokenizer: Option<String>, /// The temperature used to generate samples, use 0 for greedy sampling. #[arg(long, default_value_t = 0.8)] temperature: f64, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// Only sample among the top K samples. #[arg(long)] top_k: Option<usize>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Display the token for the specified prompt. #[arg(long)] verbose_prompt: bool, /// Process prompt elements separately. #[arg(long)] split_prompt: bool, /// Run on CPU rather than GPU even if a GPU is available. #[arg(long)] cpu: bool, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, /// The model size to use. #[arg(long, default_value = "7b")] which: Which, /// Group-Query Attention, use 8 for the 70B version of LLaMAv2. #[arg(long)] gqa: Option<usize>, /// Use the slower dmmv cuda kernel. #[arg(long)] force_dmmv: bool, } impl Args { fn tokenizer(&self) -> anyhow::Result<Tokenizer> { let tokenizer_path = match &self.tokenizer { Some(config) => std::path::PathBuf::from(config), None => { let api = hf_hub::api::sync::Api::new()?; let repo = self.which.tokenizer_repo(); let api = api.model(repo.to_string()); api.get("tokenizer.json")? } }; Tokenizer::from_file(tokenizer_path).map_err(anyhow::Error::msg) } fn model(&self) -> anyhow::Result<std::path::PathBuf> { let model_path = match &self.model { Some(config) => std::path::PathBuf::from(config), None => { let (repo, filename) = match self.which { Which::L7b => ("TheBloke/Llama-2-7B-GGML", "llama-2-7b.ggmlv3.q4_0.bin"), Which::L13b => ("TheBloke/Llama-2-13B-GGML", "llama-2-13b.ggmlv3.q4_0.bin"), Which::L70b => ("TheBloke/Llama-2-70B-GGML", "llama-2-70b.ggmlv3.q4_0.bin"), Which::L7bChat => ( "TheBloke/Llama-2-7B-Chat-GGML", "llama-2-7b-chat.ggmlv3.q4_0.bin", ), Which::L13bChat => ( "TheBloke/Llama-2-13B-Chat-GGML", "llama-2-13b-chat.ggmlv3.q4_0.bin", ), Which::L70bChat => ( "TheBloke/Llama-2-70B-Chat-GGML", "llama-2-70b-chat.ggmlv3.q4_0.bin", ), Which::L7bCode => ("TheBloke/CodeLlama-7B-GGUF", "codellama-7b.Q8_0.gguf"), Which::L13bCode => ("TheBloke/CodeLlama-13B-GGUF", "codellama-13b.Q8_0.gguf"), Which::L34bCode => ("TheBloke/CodeLlama-34B-GGUF", "codellama-34b.Q8_0.gguf"), Which::Leo7b => ( "TheBloke/leo-hessianai-7B-GGUF", "leo-hessianai-7b.Q4_K_M.gguf", ), Which::Leo13b => ( "TheBloke/leo-hessianai-13B-GGUF", "leo-hessianai-13b.Q4_K_M.gguf", ), Which::Mixtral => ( "TheBloke/Mixtral-8x7B-v0.1-GGUF", "mixtral-8x7b-v0.1.Q4_K_M.gguf", ), Which::MixtralInstruct => ( "TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF", "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf", ), Which::Mistral7b => ( "TheBloke/Mistral-7B-v0.1-GGUF", "mistral-7b-v0.1.Q4_K_S.gguf", ), Which::Mistral7bInstruct => ( "TheBloke/Mistral-7B-Instruct-v0.1-GGUF", "mistral-7b-instruct-v0.1.Q4_K_S.gguf", ), Which::Mistral7bInstructV02 => ( "TheBloke/Mistral-7B-Instruct-v0.2-GGUF", "mistral-7b-instruct-v0.2.Q4_K_S.gguf", ), Which::Zephyr7bAlpha => ( "TheBloke/zephyr-7B-alpha-GGUF", "zephyr-7b-alpha.Q4_K_M.gguf", ), Which::Zephyr7bBeta => { ("TheBloke/zephyr-7B-beta-GGUF", "zephyr-7b-beta.Q4_K_M.gguf") } Which::OpenChat35 => ("TheBloke/openchat_3.5-GGUF", "openchat_3.5.Q4_K_M.gguf"), Which::Starling7bAlpha => ( "TheBloke/Starling-LM-7B-alpha-GGUF", "starling-lm-7b-alpha.Q4_K_M.gguf", ), // TODO: swap to TheBloke model when available Which::L8b => ( "QuantFactory/Meta-Llama-3-8B-GGUF", "Meta-Llama-3-8B.Q4_K_S.gguf", ), Which::Phi3 => ( "microsoft/Phi-3-mini-4k-instruct-gguf", "Phi-3-mini-4k-instruct-q4.gguf", ), Which::SmolLM2_360MInstruct => ( "HuggingFaceTB/SmolLM2-360M-Instruct-GGUF", "smollm2-360m-instruct-q8_0.gguf", ), Which::SmolLM2_1BInstruct => ( "HuggingFaceTB/SmolLM2-1.7B-Instruct-GGUF", "smollm2-1.7b-instruct-q4_k_m.gguf", ), }; let revision = if self.which == Which::Phi3 { "5eef2ce24766d31909c0b269fe90c817a8f263fb" } else { "main" }; let api = hf_hub::api::sync::Api::new()?; api.repo(hf_hub::Repo::with_revision( repo.to_string(), hf_hub::RepoType::Model, revision.to_string(), )) .get(filename)? } }; Ok(model_path) } } fn format_size(size_in_bytes: usize) -> String { if size_in_bytes < 1_000 { format!("{}B", size_in_bytes) } else if size_in_bytes < 1_000_000 { format!("{:.2}KB", size_in_bytes as f64 / 1e3) } else if size_in_bytes < 1_000_000_000 { format!("{:.2}MB", size_in_bytes as f64 / 1e6) } else { format!("{:.2}GB", size_in_bytes as f64 / 1e9) } } fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); #[cfg(feature = "cuda")] candle::quantized::cuda::set_force_dmmv(args.force_dmmv); candle::cuda::set_gemm_reduced_precision_f16(true); candle::cuda::set_gemm_reduced_precision_bf16(true); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature, args.repeat_penalty, args.repeat_last_n ); let model_path = args.model()?; let mut file = std::fs::File::open(&model_path)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let mut model = match model_path.extension().and_then(|v| v.to_str()) { Some("gguf") => { let model = gguf_file::Content::read(&mut file).map_err(|e| e.with_path(model_path))?; let mut total_size_in_bytes = 0; for (_, tensor) in model.tensor_infos.iter() { let elem_count = tensor.shape.elem_count(); total_size_in_bytes += elem_count * tensor.ggml_dtype.type_size() / tensor.ggml_dtype.block_size(); } println!( "loaded {:?} tensors ({}) in {:.2}s", model.tensor_infos.len(), &format_size(total_size_in_bytes), start.elapsed().as_secs_f32(), ); ModelWeights::from_gguf(model, &mut file, &device)? } Some("ggml" | "bin") | Some(_) | None => { let model = ggml_file::Content::read(&mut file, &device) .map_err(|e| e.with_path(model_path))?; let mut total_size_in_bytes = 0; for (_, tensor) in model.tensors.iter() { let elem_count = tensor.shape().elem_count(); total_size_in_bytes += elem_count * tensor.dtype().type_size() / tensor.dtype().block_size(); } println!( "loaded {:?} tensors ({}) in {:.2}s", model.tensors.len(), &format_size(total_size_in_bytes), start.elapsed().as_secs_f32(), ); println!("params: {:?}", model.hparams); let default_gqa = match args.which { Which::L7b | Which::L13b | Which::L7bChat | Which::L13bChat | Which::L7bCode | Which::L13bCode | Which::L34bCode | Which::Leo7b | Which::Leo13b | Which::L8b | Which::SmolLM2_1BInstruct | Which::SmolLM2_360MInstruct | Which::Phi3 => 1, Which::Mixtral | Which::MixtralInstruct | Which::Mistral7b | Which::Mistral7bInstruct | Which::Mistral7bInstructV02 | Which::Zephyr7bAlpha | Which::Zephyr7bBeta | Which::L70b | Which::L70bChat | Which::OpenChat35 | Which::Starling7bAlpha => 8, }; ModelWeights::from_ggml(model, args.gqa.unwrap_or(default_gqa))? } }; println!("model built"); let tokenizer = args.tokenizer()?; let mut tos = TokenOutputStream::new(tokenizer); let prompt = match args.prompt.as_deref() { Some("chat") => Prompt::Chat, Some("interactive") => Prompt::Interactive, Some(s) => Prompt::One(s.to_string()), None => Prompt::One(DEFAULT_PROMPT.to_string()), }; let mut pre_prompt_tokens = vec![]; for prompt_index in 0.. { let prompt_str = match &prompt { Prompt::One(prompt) => prompt.clone(), Prompt::Interactive | Prompt::Chat => { let is_interactive = matches!(prompt, Prompt::Interactive); print!("> "); std::io::stdout().flush()?; let mut prompt = String::new(); std::io::stdin().read_line(&mut prompt)?; if prompt.ends_with('\n') { prompt.pop(); if prompt.ends_with('\r') { prompt.pop(); } } if args.which.is_open_chat() { format!("GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:") } else if args.which.is_zephyr() { if prompt_index == 0 || is_interactive { format!("<|system|>\n</s>\n<|user|>\n{prompt}</s>\n<|assistant|>",) } else { format!("<|user|>\n{prompt}</s>\n<|assistant|>") } } else if args.which.is_mistral() { format!("[INST] {prompt} [/INST]") } else { prompt } } }; print!("{}", &prompt_str); let tokens = tos .tokenizer() .encode(prompt_str, true) .map_err(anyhow::Error::msg)?; if args.verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('▁', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } let prompt_tokens = [&pre_prompt_tokens, tokens.get_ids()].concat(); let to_sample = args.sample_len.saturating_sub(1); let prompt_tokens = if prompt_tokens.len() + to_sample > model::MAX_SEQ_LEN - 10 { let to_remove = prompt_tokens.len() + to_sample + 10 - model::MAX_SEQ_LEN; prompt_tokens[prompt_tokens.len().saturating_sub(to_remove)..].to_vec() } else { prompt_tokens }; let mut all_tokens = vec![]; let mut logits_processor = { let temperature = args.temperature; let sampling = if temperature <= 0. { Sampling::ArgMax } else { match (args.top_k, args.top_p) { (None, None) => Sampling::All { temperature }, (Some(k), None) => Sampling::TopK { k, temperature }, (None, Some(p)) => Sampling::TopP { p, temperature }, (Some(k), Some(p)) => Sampling::TopKThenTopP { k, p, temperature }, } }; LogitsProcessor::from_sampling(args.seed, sampling) }; let start_prompt_processing = std::time::Instant::now(); let mut next_token = if !args.split_prompt { let input = Tensor::new(prompt_tokens.as_slice(), &device)?.unsqueeze(0)?; let logits = model.forward(&input, 0)?; let logits = logits.squeeze(0)?; logits_processor.sample(&logits)? } else { let mut next_token = 0; for (pos, token) in prompt_tokens.iter().enumerate() { let input = Tensor::new(&[*token], &device)?.unsqueeze(0)?; let logits = model.forward(&input, pos)?; let logits = logits.squeeze(0)?; next_token = logits_processor.sample(&logits)? } next_token }; let prompt_dt = start_prompt_processing.elapsed(); all_tokens.push(next_token); if let Some(t) = tos.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } let eos_token = match args.which { Which::SmolLM2_360MInstruct | Which::SmolLM2_1BInstruct => "<|endoftext|>", Which::L8b => "<|end_of_text|>", _ => match args.which.is_open_chat() { true => "<|end_of_turn|>", false => "</s>", }, }; let eos_token = *tos.tokenizer().get_vocab(true).get(eos_token).unwrap(); let start_post_prompt = std::time::Instant::now(); let mut sampled = 0; for index in 0..to_sample { let input = Tensor::new(&[next_token], &device)?.unsqueeze(0)?; let logits = model.forward(&input, prompt_tokens.len() + index)?; let logits = logits.squeeze(0)?; let logits = if args.repeat_penalty == 1. { logits } else { let start_at = all_tokens.len().saturating_sub(args.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, args.repeat_penalty, &all_tokens[start_at..], )? }; next_token = logits_processor.sample(&logits)?; all_tokens.push(next_token); if let Some(t) = tos.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } sampled += 1; if next_token == eos_token { break; }; } if let Some(rest) = tos.decode_rest().map_err(candle::Error::msg)? { print!("{rest}"); } std::io::stdout().flush()?; let dt = start_post_prompt.elapsed(); println!( "\n\n{:4} prompt tokens processed: {:.2} token/s", prompt_tokens.len(), prompt_tokens.len() as f64 / prompt_dt.as_secs_f64(), ); println!( "{sampled:4} tokens generated: {:.2} token/s", sampled as f64 / dt.as_secs_f64(), ); match prompt { Prompt::One(_) => break, Prompt::Interactive => {} Prompt::Chat => { pre_prompt_tokens = [prompt_tokens.as_slice(), all_tokens.as_slice()].concat() } } } Ok(()) }
candle/candle-examples/examples/quantized/main.rs/0
{ "file_path": "candle/candle-examples/examples/quantized/main.rs", "repo_id": "candle", "token_count": 13386 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::repvgg; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { A0, A1, A2, B0, B1, B2, B3, B1G4, B2G4, B3G4, } impl Which { fn model_filename(&self) -> String { let name = match self { Self::A0 => "a0", Self::A1 => "a1", Self::A2 => "a2", Self::B0 => "b0", Self::B1 => "b1", Self::B2 => "b2", Self::B3 => "b3", Self::B1G4 => "b1g4", Self::B2G4 => "b2g4", Self::B3G4 => "b3g4", }; format!("timm/repvgg_{}.rvgg_in1k", name) } fn config(&self) -> repvgg::Config { match self { Self::A0 => repvgg::Config::a0(), Self::A1 => repvgg::Config::a1(), Self::A2 => repvgg::Config::a2(), Self::B0 => repvgg::Config::b0(), Self::B1 => repvgg::Config::b1(), Self::B2 => repvgg::Config::b2(), Self::B3 => repvgg::Config::b3(), Self::B1G4 => repvgg::Config::b1g4(), Self::B2G4 => repvgg::Config::b2g4(), Self::B3G4 => repvgg::Config::b3g4(), } } } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(value_enum, long, default_value_t=Which::A0)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let model_name = args.which.model_filename(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); api.get("model.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = repvgg::repvgg(&args.which.config(), 1000, vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/repvgg/main.rs/0
{ "file_path": "candle/candle-examples/examples/repvgg/main.rs", "repo_id": "candle", "token_count": 1525 }
# silero-vad: Voice Activity Detection [Silero VAD (v5)](https://github.com/snakers4/silero-vad) detects voice activity in streaming audio. This example uses the models available in the hugging face [onnx-community/silero-vad](https://huggingface.co/onnx-community/silero-vad). ## Running the example ```bash $ arecord -t raw -f S16_LE -r 16000 -c 1 -d 5 - | cargo run --example silero-vad --release --features onnx -- --sample-rate 16000 ```
candle/candle-examples/examples/silero-vad/README.md/0
{ "file_path": "candle/candle-examples/examples/silero-vad/README.md", "repo_id": "candle", "token_count": 155 }
# candle-stella-en-v5: Implementation of [stella_en_1.5B_v5](https://huggingface.co/dunzhang/stella_en_1.5B_v5) embedding model As of 7th Oct 2024, *Stella_en_1.5B_v5* is one of the top ranking model on `retrieval` and `reranking` tasks in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard. [Model card](https://huggingface.co/dunzhang/stella_en_1.5B_v5) on the HuggingFace Hub. ## Running the example Stella_en_1.5B_v5 is used to generate text embeddings embeddings for a prompt. The model weights are downloaded from the hub on the first run. ```bash $ cargo run --example stella-en-v5 --release -- --query "What are safetensors?" > [[ 0.3905, -0.0130, 0.2072, ..., -0.1100, -0.0086, 0.6002]] > Tensor[[1, 1024], f32] ``` Stella_en_1.5B_v5 is trained by [MRL](https://arxiv.org/abs/2205.13147) enabling multiple embedding dimensions. The following reproduces the example in the [model card](https://huggingface.co/dunzhang/stella_en_1.5B_v5) for a retrieval task (s2p). The sample queries and docs are hardcoded in the example. ```bash $ cargo run --example stella-en-v5 --release --features <metal | cuda> -- --which 1.5b > > Score: 0.8178786 > Query: What are some ways to reduce stress? > Answer: There are many effective ways to reduce stress. Some common techniques include deep breathing, meditation, and physical activity. Engaging in hobbies, spending > time in nature, and connecting with loved ones can also help alleviate stress. Additionally, setting boundaries, practicing self-care, and learning to say no can prevent > stress from building up. > > > Score: 0.7853528 > Query: What are the benefits of drinking green tea? > Answer: Green tea has been consumed for centuries and is known for its potential health benefits. It contains antioxidants that may help protect the body against damage > caused by free radicals. Regular consumption of green tea has been associated with improved heart health, enhanced cognitive function, and a reduced risk of certain types > > of cancer. The polyphenols in green tea may also have anti-inflammatory and weight loss properties. > $ cargo run --example stella-en-v5 --release --features <metal | cuda> -- --which 400m > > Score: 0.8397539 > Query: What are some ways to reduce stress? > Answer: There are many effective ways to reduce stress. Some common techniques include deep breathing, meditation, and physical activity. Engaging in hobbies, spending > time in nature, and connecting with loved ones can also help alleviate stress. Additionally, setting boundaries, practicing self-care, and learning to say no can prevent > stress from building up. > > > > Score: 0.809545 > Query: What are the benefits of drinking green tea? > Answer: Green tea has been consumed for centuries and is known for its potential health benefits. It contains antioxidants that may help protect the body against damage > caused by free radicals. Regular consumption of green tea has been associated with improved heart health, enhanced cognitive function, and a reduced risk of certain types > of cancer. The polyphenols in green tea may also have anti-inflammatory and weight loss properties. > ``` ## Supported options: - `Stella_en_v5` has 2 model variants published - a 1.5B variant and 400M variant. This is enabled through the flag `--which`. E.g. `--which 400m` or `--which 1.5b`. - `Stella_en_v5` supports 256, 768, 1024, 2048, 4096, 6144 and 8192 embedding dimensions (though the model card mentions 512, I couldn't find weights for the same). In the example run this is supported with `--embed-dim` option. E.g. `... --embed-dim 4096`. Defaults to `1024`. - As per the [model card](https://huggingface.co/dunzhang/stella_en_1.5B_v5), the model has been primarily trained on `s2s` (similarity) and `s2p` (retrieval) tasks. These require a slightly different `query` preprocessing (a different prompt template for each). In this example this is enabled though `--task` option.
candle/candle-examples/examples/stella-en-v5/README.md/0
{ "file_path": "candle/candle-examples/examples/stella-en-v5/README.md", "repo_id": "candle", "token_count": 1143 }
# Get the checkpoint from # https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt import torch from safetensors.torch import save_file data = torch.load("tiny.en.pt") weights = {} for k, v in data["model_state_dict"].items(): weights[k] = v.contiguous() print(k, v.shape, v.dtype) save_file(weights, "tiny.en.safetensors") print(data["dims"])
candle/candle-examples/examples/whisper/extract_weights.py/0
{ "file_path": "candle/candle-examples/examples/whisper/extract_weights.py", "repo_id": "candle", "token_count": 183 }
# candle-yolo-v8: Object Detection and Pose Estimation This is a port of [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics). The implementation is based on the [tinygrad version](https://github.com/tinygrad/tinygrad/blob/master/examples/yolov8.py) and on the model architecture described in this [issue](https://github.com/ultralytics/ultralytics/issues/189). The supported tasks are object detection and pose estimation. You can try this model online on the [Candle YOLOv8 Space](https://huggingface.co/spaces/lmz/candle-yolo). The model then fully runs in your browser using WebAssembly - if you use a custom image it will never leave your phone/computer! ## Running some example ### Object Detection ```bash cargo run --example yolo-v8 --release -- candle-examples/examples/yolo-v8/assets/bike.jpg ``` This prints details about the detected objects and generates a `bike.pp.jpg` file. ![Leading group, Giro d'Italia 2021](./assets/bike.jpg) Image source: [wikimedia](https://commons.wikimedia.org/wiki/File:Leading_group,_Giro_d%27Italia_2021,_Stage_15.jpg). ![Leading group, Giro d'Italia 2021](./assets/bike.od.jpg) ### Pose Estimation ```bash cargo run --example yolo-v8 --release -- \ candle-examples/examples/yolo-v8/assets/bike.jpg --task pose ``` ![Leading group, Giro d'Italia 2021](./assets/bike.pose.jpg) ### Command-line flags - `--which`: select the model variant to be used, `n`, `s` , `m`, `l`, or `x` by increasing size and quality. - `--task`: `detect` for object detection and `pose` for pose estimation. - `--legend-size`: the size of the characters to print. - `--model`: use a local model file rather than downloading it from the hub.
candle/candle-examples/examples/yolo-v8/README.md/0
{ "file_path": "candle/candle-examples/examples/yolo-v8/README.md", "repo_id": "candle", "token_count": 562 }
// Pytorch also has an implementation of Philox RNG: https://github.com/pytorch/pytorch/blob/8ca3c881db3e3510fcb7725389f6a0633c9b992c/torch/csrc/jit/tensorexpr/cuda_random.h #pragma once // Philox CUDA. namespace flash { struct ull2 { unsigned long long x; unsigned long long y; }; __forceinline__ __device__ uint2 mulhilo32(const unsigned int a, const unsigned int b) { uint2 *res; unsigned long long tmp; asm ("mul.wide.u32 %0, %1, %2;\n\t" : "=l"(tmp) : "r"(a), "r"(b)); res = (uint2*)(&tmp); return *res; } __forceinline__ __device__ uint4 philox_single_round(const uint4 ctr, const uint2 key) { constexpr unsigned long kPhiloxSA = 0xD2511F53; constexpr unsigned long kPhiloxSB = 0xCD9E8D57; uint2 res0 = mulhilo32(kPhiloxSA, ctr.x); uint2 res1 = mulhilo32(kPhiloxSB, ctr.z); uint4 ret = {res1.y ^ ctr.y ^ key.x, res1.x, res0.y ^ ctr.w ^ key.y, res0.x}; return ret; } __forceinline__ __device__ uint4 philox(unsigned long long seed, unsigned long long subsequence, unsigned long long offset) { constexpr unsigned long kPhilox10A = 0x9E3779B9; constexpr unsigned long kPhilox10B = 0xBB67AE85; uint2 key = reinterpret_cast<uint2&>(seed); uint4 counter; ull2 *tmp = reinterpret_cast<ull2*>(&counter); tmp->x = offset; tmp->y = subsequence; #pragma unroll for (int i = 0; i < 6; i++) { counter = philox_single_round(counter, key); key.x += (kPhilox10A); key.y += (kPhilox10B); } uint4 output = philox_single_round(counter, key); return output; } } // namespace flash
candle/candle-flash-attn/kernels/philox.cuh/0
{ "file_path": "candle/candle-flash-attn/kernels/philox.cuh", "repo_id": "candle", "token_count": 770 }
#include "cuda_utils.cuh" #include<stdint.h> // Naive implementation of conv1d. template <typename T, typename A> __device__ void conv1d( const size_t src_numel, const size_t l_out, const size_t stride, const size_t padding, const size_t dilation, const size_t *info, const T *src, const T *kernel, T *dst ) { // src: (b_size, c_in, l_in) // k: (c_out, c_in, k_size) const size_t *src_dims = info; const size_t *src_s = info + 3; const size_t *k_dims = info + 6; const size_t *k_s = info + 9; const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; const size_t k_size = k_dims[2]; const size_t c_out = k_dims[0]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; if (dst_i >= src_dims[0] * c_out * l_out) { return; } // TODO const size_t b_idx = dst_i / (l_out * c_out); const size_t dst_c_idx = (dst_i / l_out) % c_out; const size_t dst_l = dst_i % l_out; const size_t src_idx0 = b_idx * src_s[0]; A d = 0; for (size_t offset = 0; offset < k_size; ++offset) { size_t src_l = (stride * dst_l + offset) * dilation; if (src_l < padding || src_l >= padding + l_in) { continue; } src_l -= padding; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + src_l * src_s[2]; const size_t k_idx = dst_c_idx * k_s[0] + src_c_idx * k_s[1] + offset * k_s[2]; d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]); } } dst[dst_i] = static_cast<T>(d); } template <typename T> __device__ void im2col1d( const size_t dst_numel, const size_t l_out, const size_t l_k, const size_t stride, const size_t padding, const size_t dilation, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // dst: (b_size, l_out, c_in, l_k) // src: (b_size, c_in, l_in) if (dst_i >= dst_numel) { return; } const size_t *src_dims = info; const size_t *src_s = info + 3; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; const size_t dst_s2 = l_k; const size_t dst_s1 = c_in * dst_s2; const size_t dst_s0 = l_out * dst_s1; size_t tmp_dst_i = dst_i; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t l_idx = tmp_dst_i / dst_s1; tmp_dst_i -= l_idx * dst_s1; const size_t c_idx = tmp_dst_i / dst_s2; tmp_dst_i -= c_idx * dst_s2; const size_t l_k_idx = tmp_dst_i; size_t src_l_idx = l_idx * stride + l_k_idx * dilation; if (src_l_idx < padding || src_l_idx >= l_in + padding) { dst[dst_i] = static_cast<T>(0); } else { src_l_idx -= padding; const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_l_idx * src_s[2]; dst[dst_i] = src[src_i]; } } template <typename T> __device__ void col2im1d( const size_t dst_el, const size_t l_out, const size_t l_in, const size_t c_out, const size_t k_size, const size_t stride, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, l_in, c_out, l_k) // dst: (b_size, c_out, l_out) if (dst_i >= dst_el) { return; } const size_t dst_s0 = c_out * l_out; const size_t dst_s1 = l_out; const size_t src_s0 = c_out * k_size * l_in; const size_t src_s1 = c_out * k_size; const size_t src_s2 = k_size; size_t tmp_dst_i = dst_i; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t c_idx = tmp_dst_i / dst_s1; tmp_dst_i -= c_idx * dst_s1; const int l_out_idx = tmp_dst_i; dst[dst_i] = static_cast<T>(0); int l_in_idx = l_out_idx / stride; int k0 = l_out_idx - l_in_idx * stride; // l_out_idx = l_in_idx * stride + k0 for (; k0 < k_size && l_in_idx >= 0; k0 += stride, --l_in_idx) { if (l_in_idx < l_in) { const size_t src_i = b_idx * src_s0 + l_in_idx * src_s1 + c_idx * src_s2 + k0; dst[dst_i] += src[src_i]; } } } template <typename T> __device__ void im2col( const size_t dst_numel, const size_t h_out, const size_t w_out, const size_t h_k, const size_t w_k, const size_t stride, const size_t padding, const size_t dilation, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // dst: (b_size, h_out, w_out, c_in, h_k, w_k) // src: (b_size, c_in, h_in, w_in) if (dst_i >= dst_numel) { return; } const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; const size_t dst_s4 = w_k; const size_t dst_s3 = h_k * dst_s4; const size_t dst_s2 = c_in * dst_s3; const size_t dst_s1 = w_out * dst_s2; const size_t dst_s0 = h_out * dst_s1; size_t tmp_dst_i = dst_i; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t h_idx = tmp_dst_i / dst_s1; tmp_dst_i -= h_idx * dst_s1; const size_t w_idx = tmp_dst_i / dst_s2; tmp_dst_i -= w_idx * dst_s2; const size_t c_idx = tmp_dst_i / dst_s3; tmp_dst_i -= c_idx * dst_s3; const size_t h_k_idx = tmp_dst_i / dst_s4; tmp_dst_i -= h_k_idx * dst_s4; const size_t w_k_idx = tmp_dst_i; size_t src_h_idx = h_idx * stride + h_k_idx * dilation; size_t src_w_idx = w_idx * stride + w_k_idx * dilation; if (src_h_idx < padding || src_h_idx >= h_in + padding) { dst[dst_i] = static_cast<T>(0); } else if (src_w_idx < padding || src_w_idx >= w_in + padding) { dst[dst_i] = static_cast<T>(0); } else { src_h_idx -= padding; src_w_idx -= padding; const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_h_idx * src_s[2] + src_w_idx * src_s[3]; dst[dst_i] = src[src_i]; } } // Naive implementation of conv2d. template <typename T, typename A> __device__ void conv2d( const size_t src_numel, const size_t w_out, const size_t h_out, const size_t stride, const size_t padding, const size_t dilation, const size_t *info, const T *src, const T *kernel, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, h_in, w_in) // k: (c_out, c_in, h_k, w_k) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t *k_dims = info + 8; const size_t *k_s = info + 12; const size_t h_k = k_dims[2]; const size_t w_k = k_dims[3]; const size_t c_out = k_dims[0]; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; if (dst_i >= src_dims[0] * c_out * w_out * h_out) { return; } // TODO const size_t b_idx = dst_i / (w_out * h_out * c_out); const size_t dst_c_idx = (dst_i / (w_out * h_out)) % c_out; // NCHW layout. const size_t dst_h = (dst_i / w_out) % h_out; const size_t dst_w = dst_i % w_out; const size_t src_idx0 = b_idx * src_s[0]; A d = 0; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = stride * dst_w + w_offset * dilation; if (src_w < padding || src_w >= w_in + padding) { continue; } src_w -= padding; for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = stride * dst_h + h_offset * dilation; if (src_h < padding || src_h >= h_in + padding) { continue; } src_h -= padding; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + src_h * src_s[2] + src_w * src_s[3]; const size_t k_idx = dst_c_idx * k_s[0] + src_c_idx * k_s[1] + h_offset * k_s[2] + w_offset * k_s[3]; d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]); } } } dst[dst_i] = static_cast<T>(d); } // Naive implementation of conv_transpose1d. template <typename T, typename A> __device__ void conv_transpose1d( const size_t src_numel, const size_t l_out, const size_t stride, const size_t padding, const size_t out_padding, const size_t dilation, const size_t *info, const T *src, const T *kernel, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, l_in) // k: (c_in, c_out, l_k) const size_t *src_dims = info; const size_t *src_s = info + 3; const size_t *k_dims = info + 6; const size_t *k_s = info + 9; const size_t l_k = k_dims[2]; const size_t c_out = k_dims[1]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; if (dst_i >= src_dims[0] * c_out * l_out) { return; } // TODO const size_t b_idx = dst_i / (l_out * c_out); const size_t dst_c_idx = (dst_i / l_out) % c_out; // NCL layout. const size_t out_x = dst_i % l_out; const size_t src_idx0 = b_idx * src_s[0]; A d = 0; for (int k_x = 0; k_x < (int)l_k; ++k_x) { // let out_x = inp_x * p.stride + k_x * p.dilation - p.padding; int inp_x_stride = (int)(out_x + padding) - k_x * dilation; if (inp_x_stride < 0 || inp_x_stride % stride) { continue; } int inp_x = inp_x_stride / stride; if (inp_x >= l_in) continue; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + inp_x * src_s[2]; const size_t k_idx = src_c_idx * k_s[0] + dst_c_idx * k_s[1] + k_x * k_s[2]; d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]); } } dst[dst_i] = static_cast<T>(d); } // Naive implementation of conv_transpose2d. template <typename T, typename A> __device__ void conv_transpose2d( const size_t src_numel, const size_t w_out, const size_t h_out, const size_t stride, const size_t padding, const size_t out_padding, const size_t dilation, const size_t *info, const T *src, const T *kernel, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, h_in, w_in) // k: (c_in, c_out, h_k, w_k) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t *k_dims = info + 8; const size_t *k_s = info + 12; const size_t h_k = k_dims[2]; const size_t w_k = k_dims[3]; const size_t c_out = k_dims[1]; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; if (dst_i >= src_dims[0] * c_out * w_out * h_out) { return; } // TODO const size_t b_idx = dst_i / (w_out * h_out * c_out); const size_t dst_c_idx = (dst_i / (w_out * h_out)) % c_out; // NCHW layout. const size_t out_y = (dst_i / w_out) % h_out; const size_t out_x = dst_i % w_out; const size_t src_idx0 = b_idx * src_s[0]; A d = 0; for (int k_x = 0; k_x < (int)w_k; ++k_x) { // let out_x = inp_x * p.stride + k_x * p.dilation - p.padding; int inp_x_stride = (int)(out_x + padding) - k_x * dilation; if (inp_x_stride < 0 || inp_x_stride % stride) { continue; } int inp_x = inp_x_stride / stride; if (inp_x >= w_in) continue; for (int k_y = 0; k_y < (int)h_k; ++k_y) { int inp_y_stride = (int)(out_y + padding) - k_y * dilation; if (inp_y_stride < 0 || inp_y_stride % stride) { continue; } int inp_y = inp_y_stride / stride; if (inp_y >= h_in) continue; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + inp_y * src_s[2] + inp_x * src_s[3]; const size_t k_idx = src_c_idx * k_s[0] + dst_c_idx * k_s[1] + k_y * k_s[2] + k_x * k_s[3]; d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]); } } } dst[dst_i] = static_cast<T>(d); } template <typename T, typename A> __device__ void avg_pool2d( const size_t src_numel, const size_t w_k, const size_t h_k, const size_t w_stride, const size_t h_stride, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, w_in, h_in) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (dst_i >= src_dims[0] * c * w_out * h_out) { return; } // TODO: Improve this. const size_t b_idx = dst_i / (w_out * h_out * c); const size_t c_idx = (dst_i / (w_out * h_out)) % c; const size_t dst_w = (dst_i / h_out) % w_out; const size_t dst_h = dst_i % h_out; const size_t src_idx0 = b_idx * src_s[0]; const float scale = 1.0 / (w_k * h_k); A d = 0; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in) { continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3]; d += static_cast<A>(src[src_idx]); } } dst[dst_i] = static_cast<T>(d * scale); } template <typename T> __device__ void max_pool2d( const size_t src_numel, const size_t w_k, const size_t h_k, const size_t w_stride, const size_t h_stride, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, w_in, h_in) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (dst_i >= src_dims[0] * c * w_out * h_out) { return; } // TODO: Improve this. const size_t b_idx = dst_i / (w_out * h_out * c); const size_t c_idx = (dst_i / (w_out * h_out)) % c; const size_t dst_w = (dst_i / h_out) % w_out; const size_t dst_h = dst_i % h_out; const size_t src_idx0 = b_idx * src_s[0]; T d = 0; bool set = false; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in) { continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3]; if (set) { d = maxg(d, src[src_idx]); } else { d = src[src_idx]; set = true; } } } dst[dst_i] = d; } template <typename T> __device__ void upsample_nearest2d( const size_t w_out, const size_t h_out, const double w_scale, const double h_scale, const size_t *info, const T *src, T *dst ) { const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x; // src: (b_size, c_in, w_in, h_in) const size_t *src_dims = info; const size_t *src_s = info + 4; const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; if (dst_i >= src_dims[0] * c * w_out * h_out) { return; } // TODO: Improve this. const size_t b_idx = dst_i / (w_out * h_out * c); const size_t c_idx = (dst_i / (w_out * h_out)) % c; const size_t dst_w = (dst_i / h_out) % w_out; const size_t dst_h = dst_i % h_out; size_t src_w = static_cast<size_t>(dst_w * w_scale); size_t src_h = static_cast<size_t>(dst_h * h_scale); if (src_w >= w_in) { src_w = w_in - 1; } if (src_h >= h_in) { src_h = h_in - 1; } const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3]; dst[dst_i] = src[src_i]; } #define CONV1D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t num_dims, \ const size_t stride, \ const size_t padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ const TYPENAME *kernel, \ TYPENAME *dst \ ) { \ conv1d<TYPENAME, TYPEACC>(src_numel, num_dims, stride, padding, dilation, info, src, kernel, dst); \ } \ #define CONV2D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t w_out, \ const size_t h_out, \ const size_t stride, \ const size_t padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ const TYPENAME *kernel, \ TYPENAME *dst \ ) { \ conv2d<TYPENAME, TYPEACC>(src_numel, w_out, h_out, stride, padding, dilation, info, src, kernel, dst); \ } \ #define IM2COL1D_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t dst_numel, \ const size_t l_out, \ const size_t l_k, \ const size_t stride, \ const size_t padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ im2col1d<TYPENAME>(dst_numel, l_out, l_k, stride, padding, dilation, info, src, dst); \ } \ #define COL2IM1D_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t dst_el, \ const size_t l_out, \ const size_t l_in, \ const size_t c_out, \ const size_t k_size, \ const size_t stride, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ col2im1d<TYPENAME>(dst_el, l_out, l_in, c_out, k_size, stride, src, dst); \ } \ #define IM2COL_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t dst_numel, \ const size_t h_out, \ const size_t w_out, \ const size_t h_k, \ const size_t w_k, \ const size_t stride, \ const size_t padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ im2col<TYPENAME>(dst_numel, h_out, w_out, h_k, w_k, stride, padding, dilation, info, src, dst); \ } \ #define CONVT1D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t l_out, \ const size_t stride, \ const size_t padding, \ const size_t out_padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ const TYPENAME *kernel, \ TYPENAME *dst \ ) { \ conv_transpose1d<TYPENAME, TYPEACC>(src_numel, l_out, stride, padding, out_padding, dilation, info, src, kernel, dst); \ } \ #define CONVT2D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t w_out, \ const size_t h_out, \ const size_t stride, \ const size_t padding, \ const size_t out_padding, \ const size_t dilation, \ const size_t *info, \ const TYPENAME *src, \ const TYPENAME *kernel, \ TYPENAME *dst \ ) { \ conv_transpose2d<TYPENAME, TYPEACC>(src_numel, w_out, h_out, stride, padding, out_padding, dilation, info, src, kernel, dst); \ } \ #define AVG_POOL2D_OP(TYPENAME, TYPEACC, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t w_k, \ const size_t h_k, \ const size_t w_stride, \ const size_t h_stride, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ avg_pool2d<TYPENAME, TYPEACC>(src_numel, w_k, h_k, w_stride, h_stride, info, src, dst); \ } \ #define MAX_POOL2D_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t src_numel, \ const size_t w_k, \ const size_t h_k, \ const size_t w_stride, \ const size_t h_stride, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ max_pool2d<TYPENAME>(src_numel, w_k, h_k, w_stride, h_stride, info, src, dst); \ } \ #define UPSAMPLE_NEAREST2D_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t w_out, \ const size_t h_out, \ const double w_scale, \ const double h_scale, \ const size_t *info, \ const TYPENAME *src, \ TYPENAME *dst \ ) { \ upsample_nearest2d<TYPENAME>(w_out, h_out, w_scale, h_scale, info, src, dst); \ } \ #if __CUDA_ARCH__ >= 800 CONV1D_OP(__nv_bfloat16, float, conv1d_bf16) CONV2D_OP(__nv_bfloat16, float, conv2d_bf16) CONVT1D_OP(__nv_bfloat16, float, conv_transpose1d_bf16) CONVT2D_OP(__nv_bfloat16, float, conv_transpose2d_bf16) AVG_POOL2D_OP(__nv_bfloat16, float, avg_pool2d_bf16) MAX_POOL2D_OP(__nv_bfloat16, max_pool2d_bf16) UPSAMPLE_NEAREST2D_OP(__nv_bfloat16, upsample_nearest2d_bf16) IM2COL_OP(__nv_bfloat16, im2col_bf16) IM2COL1D_OP(__nv_bfloat16, im2col1d_bf16) COL2IM1D_OP(__nv_bfloat16, col2im1d_bf16) #endif #if __CUDA_ARCH__ >= 530 CONV1D_OP(__half, float, conv1d_f16) CONV2D_OP(__half, float, conv2d_f16) CONVT1D_OP(__half, float, conv_transpose1d_f16) CONVT2D_OP(__half, float, conv_transpose2d_f16) AVG_POOL2D_OP(__half, float, avg_pool2d_f16) MAX_POOL2D_OP(__half, max_pool2d_f16) UPSAMPLE_NEAREST2D_OP(__half, upsample_nearest2d_f16) IM2COL_OP(__half, im2col_f16) IM2COL1D_OP(__half, im2col1d_f16) COL2IM1D_OP(__half, col2im1d_f16) #endif CONV1D_OP(float, float, conv1d_f32) CONV1D_OP(double, double, conv1d_f64) CONV1D_OP(uint8_t, uint8_t, conv1d_u8) CONV1D_OP(uint32_t, uint32_t, conv1d_u32) CONV2D_OP(float, float, conv2d_f32) CONV2D_OP(double, double, conv2d_f64) CONV2D_OP(uint8_t, uint8_t, conv2d_u8) CONV2D_OP(uint32_t, uint32_t, conv2d_u32) CONVT1D_OP(float, float, conv_transpose1d_f32) CONVT1D_OP(double, double, conv_transpose1d_f64) CONVT1D_OP(uint8_t, uint8_t, conv_transpose1d_u8) CONVT1D_OP(uint32_t, uint32_t, conv_transpose1d_u32) CONVT2D_OP(float, float, conv_transpose2d_f32) CONVT2D_OP(double, double, conv_transpose2d_f64) CONVT2D_OP(uint8_t, uint8_t, conv_transpose2d_u8) CONVT2D_OP(uint32_t, uint32_t, conv_transpose2d_u32) AVG_POOL2D_OP(float, float, avg_pool2d_f32) AVG_POOL2D_OP(double, double, avg_pool2d_f64) AVG_POOL2D_OP(uint8_t, uint8_t, avg_pool2d_u8) AVG_POOL2D_OP(uint32_t, uint32_t, avg_pool2d_u32) MAX_POOL2D_OP(float, max_pool2d_f32) MAX_POOL2D_OP(double, max_pool2d_f64) MAX_POOL2D_OP(uint8_t, max_pool2d_u8) MAX_POOL2D_OP(uint32_t, max_pool2d_u32) UPSAMPLE_NEAREST2D_OP(float, upsample_nearest2d_f32) UPSAMPLE_NEAREST2D_OP(double, upsample_nearest2d_f64) UPSAMPLE_NEAREST2D_OP(uint8_t, upsample_nearest2d_u8) UPSAMPLE_NEAREST2D_OP(uint32_t, upsample_nearest2d_u32) IM2COL_OP(float, im2col_f32) IM2COL_OP(double, im2col_f64) IM2COL_OP(uint8_t, im2col_u8) IM2COL_OP(uint32_t, im2col_u32) IM2COL1D_OP(float, im2col1d_f32) IM2COL1D_OP(double, im2col1d_f64) IM2COL1D_OP(uint8_t, im2col1d_u8) IM2COL1D_OP(uint32_t, im2col1d_u32) COL2IM1D_OP(float, col2im1d_f32) COL2IM1D_OP(double, col2im1d_f64) COL2IM1D_OP(uint8_t, col2im1d_u8) COL2IM1D_OP(uint32_t, col2im1d_u32)
candle/candle-kernels/src/conv.cu/0
{ "file_path": "candle/candle-kernels/src/conv.cu", "repo_id": "candle", "token_count": 11728 }
#include <metal_stdlib> using namespace metal; #define MAX(x, y) ((x) > (y) ? (x) : (y)) template <typename T> METAL_FUNC void im2col( constant size_t &dst_numel, constant size_t &h_out, constant size_t &w_out, constant size_t &h_k, constant size_t &w_k, constant size_t &stride, constant size_t &padding, constant size_t &dilation, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // dst: (b_size, h_out, w_out, c_in, h_k, w_k) // src: (b_size, c_in, h_in, w_in) if (tid >= dst_numel) { return; } const size_t b_in = src_dims[0]; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; const size_t dst_s4 = w_k; const size_t dst_s3 = h_k * dst_s4; const size_t dst_s2 = c_in * dst_s3; const size_t dst_s1 = w_out * dst_s2; const size_t dst_s0 = h_out * dst_s1; size_t tmp_tid = tid; const size_t b_idx = tmp_tid / dst_s0; tmp_tid -= b_idx * dst_s0; const size_t h_idx = tmp_tid / dst_s1; tmp_tid -= h_idx * dst_s1; const size_t w_idx = tmp_tid / dst_s2; tmp_tid -= w_idx * dst_s2; const size_t c_idx = tmp_tid / dst_s3; tmp_tid -= c_idx * dst_s3; const size_t h_k_idx = tmp_tid / dst_s4; tmp_tid -= h_k_idx * dst_s4; const size_t w_k_idx = tmp_tid; size_t src_h_idx = h_idx * stride + h_k_idx * dilation; size_t src_w_idx = w_idx * stride + w_k_idx * dilation; if (src_h_idx < padding || src_h_idx >= h_in + padding) { dst[tid] = static_cast<T>(0); } else if (src_w_idx < padding || src_w_idx >= w_in + padding) { dst[tid] = static_cast<T>(0); } else { src_h_idx -= padding; src_w_idx -= padding; const size_t src_i = b_idx * src_strides[0] + c_idx * src_strides[1] + src_h_idx * src_strides[2] + src_w_idx * src_strides[3]; dst[tid] = src[src_i]; } } template <typename T> METAL_FUNC void col2im1d( constant size_t &dst_el, constant size_t &l_out, constant size_t &l_in, constant size_t &c_out, constant size_t &k_size, constant size_t &stride, device const T *src, device T *dst, uint dst_i [[ thread_position_in_grid ]] ) { // src: (b_size, l_in, c_out, l_k) // dst: (b_size, c_out, l_out) if (dst_i >= dst_el) { return; } const size_t dst_s0 = c_out * l_out; const size_t dst_s1 = l_out; const size_t src_s0 = c_out * k_size * l_in; const size_t src_s1 = c_out * k_size; const size_t src_s2 = k_size; size_t tmp_dst_i = dst_i; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t c_idx = tmp_dst_i / dst_s1; tmp_dst_i -= c_idx * dst_s1; const int l_out_idx = tmp_dst_i; dst[dst_i] = static_cast<T>(0); int l_in_idx = l_out_idx / stride; int k0 = l_out_idx - l_in_idx * stride; // l_out_idx = l_in_idx * stride + k0 for (; k0 < k_size && l_in_idx >= 0; k0 += stride, --l_in_idx) { if (l_in_idx < l_in) { const size_t src_i = b_idx * src_s0 + l_in_idx * src_s1 + c_idx * src_s2 + k0; dst[dst_i] += src[src_i]; } } } template <typename T> METAL_FUNC void im2col1d( constant size_t &dst_numel, constant size_t &l_out, constant size_t &l_k, constant size_t &stride, constant size_t &padding, constant size_t &dilation, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // dst: (b_size, l_out, c_in, l_k) // src: (b_size, c_in, l_in) if (tid >= dst_numel) { return; } const size_t b_in = src_dims[0]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; const size_t dst_s2 = l_k; const size_t dst_s1 = c_in * dst_s2; const size_t dst_s0 = l_out * dst_s1; size_t tmp_dst_i = tid; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t l_idx = tmp_dst_i / dst_s1; tmp_dst_i -= l_idx * dst_s1; const size_t c_idx = tmp_dst_i / dst_s2; tmp_dst_i -= c_idx * dst_s2; const size_t l_k_idx = tmp_dst_i; size_t src_l_idx = l_idx * stride + l_k_idx * dilation; if (src_l_idx < padding || src_l_idx >= l_in + padding) { dst[tid] = static_cast<T>(0); } else { src_l_idx -= padding; const size_t src_i = b_idx * src_strides[0] + c_idx * src_strides[1] + src_l_idx * src_strides[2]; dst[tid] = src[src_i]; } } template <typename T> METAL_FUNC void upsample_nearest2d( constant size_t &w_out, constant size_t &h_out, constant float &w_scale, constant float &h_scale, constant size_t *src_dims, constant size_t *src_s, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // src: (b_size, c_in, w_in, h_in) const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; if (tid >= src_dims[0] * c * w_out * h_out) { return; } // TODO: Improve this. const size_t b_idx = tid / (w_out * h_out * c); const size_t c_idx = (tid / (w_out * h_out)) % c; const size_t dst_w = (tid / h_out) % w_out; const size_t dst_h = tid % h_out; size_t src_w = static_cast<size_t>(dst_w * w_scale); size_t src_h = static_cast<size_t>(dst_h * h_scale); if (src_w >= w_in) { src_w = w_in - 1; } if (src_h >= h_in) { src_h = h_in - 1; } const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3]; dst[tid] = src[src_i]; } #define IM2COL_OP(T, FN_NAME) \ kernel void FN_NAME( \ constant size_t &dst_numel, \ constant size_t &h_out, \ constant size_t &w_out, \ constant size_t &h_k, \ constant size_t &w_k, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &dilation, \ constant size_t *src_dims, \ constant size_t *src_strides, \ device const T *src, \ device T *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ im2col<T>(dst_numel, h_out, w_out, h_k, w_k, stride, padding, dilation, src_dims, src_strides, src, dst, tid); \ } \ #define IM2COL1D_OP(T, FN_NAME) \ kernel void FN_NAME( \ constant size_t &dst_numel, \ constant size_t &l_out, \ constant size_t &l_k, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &dilation, \ constant size_t *src_dims, \ constant size_t *src_strides, \ device const T *src, \ device T *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ im2col1d<T>(dst_numel, l_out, l_k, stride, padding, dilation, src_dims, src_strides, src, dst, tid); \ } \ #define COL2IM1D_OP(T, FN_NAME) \ kernel void FN_NAME( \ constant size_t &dst_el, \ constant size_t &l_out, \ constant size_t &l_in, \ constant size_t &c_out, \ constant size_t &k_size, \ constant size_t &stride, \ device const T *src, \ device T *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ col2im1d<T>(dst_el, l_out, l_in, c_out, k_size, stride, src, dst, tid); \ } \ #define UPSAMPLE_NEAREST2D_OP(TYPENAME, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_out, \ constant size_t &h_out, \ constant float &w_scale, \ constant float &h_scale, \ constant size_t *dims, \ constant size_t *strides, \ device const TYPENAME *src, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ upsample_nearest2d<TYPENAME>(w_out, h_out, w_scale, h_scale, dims, strides, src, dst, tid); \ } \ template <typename T, typename A> METAL_FUNC void avg_pool2d( constant size_t &w_k, constant size_t &h_k, constant size_t &w_stride, constant size_t &h_stride, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (tid >= src_dims[0] * c * w_out * h_out) { return; } const size_t b_idx = tid / (w_out * h_out * c); const size_t c_idx = (tid / (w_out * h_out)) % c; const size_t dst_w = (tid / h_out) % w_out; const size_t dst_h = tid % h_out; const size_t src_idx0 = b_idx * src_strides[0]; A d = 0; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in){ continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_strides[1] + src_w * src_strides[2] + src_h * src_strides[3]; d += static_cast<A>(src[src_idx]); } } dst[tid] = static_cast<T>(d / (w_k * h_k)); } #define AVGPOOL2D_OP(TYPENAME, TYPEACC, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_k, \ constant size_t &h_k, \ constant size_t &w_s, \ constant size_t &h_s, \ constant size_t *src_dims, \ constant size_t *src_s, \ device const TYPENAME *src, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ avg_pool2d<TYPENAME, TYPEACC>(w_k, h_k, w_s, h_s, src_dims, src_s, src, dst, tid); \ } \ template <typename T> METAL_FUNC void max_pool2d( constant size_t &w_k, constant size_t &h_k, constant size_t &w_stride, constant size_t &h_stride, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (tid >= src_dims[0] * c * w_out * h_out) { return; } const size_t b_idx = tid / (w_out * h_out * c); const size_t c_idx = (tid / (w_out * h_out)) % c; const size_t dst_w = (tid / h_out) % w_out; const size_t dst_h = tid % h_out; const size_t src_idx0 = b_idx * src_strides[0]; T d = 0; bool set = false; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in){ continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_strides[1] + src_w * src_strides[2] + src_h * src_strides[3]; if (set) { d = MAX(d, src[src_idx]); } else { d = src[src_idx]; set = true; } } } dst[tid] = d; } #define MAXPOOL2D_OP(TYPENAME, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_k, \ constant size_t &h_k, \ constant size_t &w_s, \ constant size_t &h_s, \ constant size_t *src_dims, \ constant size_t *src_s, \ device const TYPENAME *src, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ max_pool2d<TYPENAME>(w_k, h_k, w_s, h_s, src_dims, src_s, src, dst, tid); \ } \ // Naive implementation of conv_transpose1d. template <typename T, typename A> METAL_FUNC void conv_transpose1d( constant size_t &l_out, constant size_t &stride, constant size_t &padding, constant size_t &out_padding, constant size_t &dilation, constant size_t *src_dims, constant size_t *src_strides, constant size_t *k_dims, constant size_t *k_strides, device const T *src, device const T *k, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // src: (b_size, c_in, l_in) // kernel: (c_in, c_out, l_k) const size_t l_k = k_dims[2]; const size_t c_out = k_dims[1]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; if (tid >= src_dims[0] * c_out * l_out) { return; } const size_t b_idx = tid / (l_out * c_out); const size_t dst_c_idx = (tid / l_out) % c_out; const size_t out_x = tid % l_out; const size_t src_idx0 = b_idx * src_strides[0]; A d = 0; for (int k_x = 0; k_x < (int)l_k; ++k_x) { // let out_x = inp_x * p.stride + k_x * p.dilation - p.padding; int inp_x_stride = (int)(out_x + padding) - k_x * dilation; if (inp_x_stride < 0 || inp_x_stride % stride) { continue; } int inp_x = inp_x_stride / stride; if (inp_x >= l_in) continue; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_strides[1] + inp_x * src_strides[2]; const size_t k_idx = src_c_idx * k_strides[0] + dst_c_idx * k_strides[1] + k_x * k_strides[2]; d += static_cast<A>(src[src_idx]) * static_cast<A>(k[k_idx]); } } dst[tid] = static_cast<T>(d); } #define CONVT1D_OP(TYPENAME, TYPEACC, FN_NAME) \ kernel void FN_NAME( \ constant size_t &l_out, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &out_padding, \ constant size_t &dilation, \ constant size_t *src_dims, \ constant size_t *src_strides, \ constant size_t *k_dims, \ constant size_t *k_strides, \ device const TYPENAME *src, \ device const TYPENAME *k, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ conv_transpose1d<TYPENAME, TYPEACC>(l_out, stride, padding, out_padding, dilation, src_dims, src_strides, k_dims, k_strides, src, k, dst, tid); \ } \ template <typename T, typename A> METAL_FUNC void conv_transpose2d( constant size_t &w_out, constant size_t &h_out, constant size_t &stride, constant size_t &padding, constant size_t &out_padding, constant size_t &dilation, constant size_t *input_dims, constant size_t *input_stride, constant size_t *k_dims, constant size_t *k_stride, device const T *src, device const T *k, device T *dst, uint tid [[ thread_position_in_grid ]] ) { const size_t h_k = k_dims[2]; const size_t w_k = k_dims[3]; const size_t c_out = k_dims[1]; const size_t c_in = input_dims[1]; const size_t h_in = input_dims[2]; const size_t w_in = input_dims[3]; if (tid >= input_dims[0] * c_out * w_out * h_out) { return; } const size_t b_idx = tid / (w_out * h_out * c_out); const size_t dst_c_idx = (tid / (w_out * h_out)) % c_out; const size_t out_y = (tid / w_out) % h_out; const size_t out_x = tid % w_out; const size_t src_idx0 = b_idx * input_stride[0]; A d = 0; for (int k_x = 0; k_x < (int)w_k; ++k_x) { const int inp_x_stride = (int)(out_x + padding) - k_x * dilation; if (inp_x_stride < 0 || inp_x_stride % stride) { continue; } const int inp_x = inp_x_stride / stride; if (inp_x >= w_in) continue; for (int k_y = 0; k_y < (int)h_k; ++k_y) { const int inp_y_stride = (int)(out_y + padding) - k_y * dilation; if (inp_y_stride < 0 || inp_y_stride % stride) { continue; } const int inp_y = inp_y_stride / stride; if (inp_y >= h_in) continue; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * input_stride[1] + inp_y * input_stride[2] + inp_x * input_stride[3]; const size_t k_idx = src_c_idx * k_stride[0] + dst_c_idx * k_stride[1] + k_y * k_stride[2] + k_x * k_stride[3]; d += static_cast<A>(src[src_idx]) * static_cast<A>(k[k_idx]); } } } dst[tid] = static_cast<T>(d); } #define CONVT2D_OP(TYPENAME, TYPEACC, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_out, \ constant size_t &h_out, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &out_padding, \ constant size_t &dilation, \ constant size_t *input_dims, \ constant size_t *input_stride, \ constant size_t *k_dims, \ constant size_t *k_stride, \ device const TYPENAME *src, \ device const TYPENAME *k, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ conv_transpose2d<TYPENAME, TYPEACC>(w_out, h_out, stride, padding, out_padding, dilation, input_dims, input_stride, k_dims, k_stride, src, k, dst, tid); \ } \ IM2COL_OP(float, im2col_f32) IM2COL_OP(half, im2col_f16) IM2COL_OP(uint8_t, im2col_u8) IM2COL_OP(uint32_t, im2col_u32) #if defined(__HAVE_BFLOAT__) IM2COL_OP(bfloat, im2col_bf16) #endif COL2IM1D_OP(float, col2im1d_f32) COL2IM1D_OP(uint8_t, col2im1d_u8) COL2IM1D_OP(uint32_t, col2im1d_u32) IM2COL1D_OP(float, im2col1d_f32) IM2COL1D_OP(uint8_t, im2col1d_u8) IM2COL1D_OP(uint32_t, im2col1d_u32) UPSAMPLE_NEAREST2D_OP(float, upsample_nearest2d_f32) UPSAMPLE_NEAREST2D_OP(half, upsample_nearest2d_f16) UPSAMPLE_NEAREST2D_OP(uint8_t, upsample_nearest2d_u8) UPSAMPLE_NEAREST2D_OP(uint32_t, upsample_nearest2d_u32) #if defined(__HAVE_BFLOAT__) UPSAMPLE_NEAREST2D_OP(bfloat, upsample_nearest2d_bf16) #endif MAXPOOL2D_OP(float, max_pool2d_f32) MAXPOOL2D_OP(half, max_pool2d_f16) MAXPOOL2D_OP(uint32_t, max_pool2d_u32) MAXPOOL2D_OP(uint8_t, max_pool2d_u8) #if defined(__HAVE_BFLOAT__) MAXPOOL2D_OP(bfloat, max_pool2d_bf16) #endif AVGPOOL2D_OP(float, float, avg_pool2d_f32) AVGPOOL2D_OP(half, float, avg_pool2d_f16) AVGPOOL2D_OP(uint32_t, uint32_t, avg_pool2d_u32) AVGPOOL2D_OP(uint8_t, uint8_t, avg_pool2d_u8) #if defined(__HAVE_BFLOAT__) AVGPOOL2D_OP(bfloat, float, avg_pool2d_bf16) #endif CONVT1D_OP(float, float, conv_transpose1d_f32) CONVT1D_OP(half, float, conv_transpose1d_f16) CONVT1D_OP(uint8_t, uint8_t, conv_transpose1d_u8) CONVT1D_OP(uint32_t, uint32_t, conv_transpose1d_u32) #if defined(__HAVE_BFLOAT__) CONVT1D_OP(bfloat, float, conv_transpose1d_bf16) #endif CONVT2D_OP(float, float, conv_transpose2d_f32) CONVT2D_OP(half, float, conv_transpose2d_f16) #if defined(__HAVE_BFLOAT__) CONVT1D_OP(bfloat, float, conv_transpose2d_bf16) #endif
candle/candle-metal-kernels/src/conv.metal/0
{ "file_path": "candle/candle-metal-kernels/src/conv.metal", "repo_id": "candle", "token_count": 8944 }
#pragma once #include <metal_stdlib> using namespace metal; METAL_FUNC uint nonzero(uint n) { return n == 0 ? 1 : n; } template<uint N> constexpr uint nonzero() { return N == 0 ? 1 : N; } template<typename T> constexpr ushort granularity() { return nonzero<vec_elements<T>::value>(); } METAL_FUNC uint next_p2(uint x) { return 1 << (32 - clz(x - 1)); } METAL_FUNC uint prev_p2(uint x) { return 1 << (31 - clz(x)); } constant uint MAX_SHARED_MEM = 32767; template<typename T> METAL_FUNC uint max_shared_mem(uint n) { return min(n, prev_p2(MAX_SHARED_MEM / sizeof(T))); } METAL_FUNC uint get_strided_index( uint idx, constant const uint &num_dims, constant const size_t *dims, constant const size_t *strides ) { uint strided_i = 0; for (uint d = 0; d < num_dims; d++) { uint dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; }
candle/candle-metal-kernels/src/utils.metal/0
{ "file_path": "candle/candle-metal-kernels/src/utils.metal", "repo_id": "candle", "token_count": 453 }
//! Batch Normalization. //! //! This layer applies Batch Normalization over a mini-batch of inputs as described in [`Batch //! Normalization`]. The input is expected to have at least three dimensions. //! //! Note that this implementation is for inference only, there is no possibility to track the //! running stats. //! //! [`Batch Normalization`]: https://arxiv.org/abs/1502.03167 use candle::{DType, Result, Tensor, Var}; #[derive(Debug, Clone, Copy, PartialEq)] pub struct BatchNormConfig { pub eps: f64, pub remove_mean: bool, /// The meaning of affine here is different from LayerNorm: when false there is no learnable /// parameter at all, 1 used for gamma and 0 for beta. pub affine: bool, /// Controls exponential moving average of running stats. Defaults to 0.1 /// /// `running_stat * (1.0 - momentum) + stat * momentum`. pub momentum: f64, } impl Default for BatchNormConfig { fn default() -> Self { Self { eps: 1e-5, remove_mean: true, affine: true, momentum: 0.1, } } } impl From<f64> for BatchNormConfig { fn from(eps: f64) -> Self { Self { eps, ..Default::default() } } } #[derive(Clone, Debug)] pub struct BatchNorm { running_mean: Var, running_var: Var, weight_and_bias: Option<(Tensor, Tensor)>, remove_mean: bool, eps: f64, momentum: f64, } impl BatchNorm { fn check_validity(&self, num_features: usize) -> Result<()> { if self.eps < 0. { candle::bail!("batch-norm eps cannot be negative {}", self.eps) } if !(0.0..=1.0).contains(&self.momentum) { candle::bail!( "batch-norm momentum must be between 0 and 1, is {}", self.momentum ) } if self.running_mean.dims() != [num_features] { candle::bail!( "batch-norm running mean has unexpected shape {:?} should have shape [{num_features}]", self.running_mean.shape(), ) } if self.running_var.dims() != [num_features] { candle::bail!( "batch-norm running variance has unexpected shape {:?} should have shape [{num_features}]", self.running_var.shape(), ) } if let Some((ref weight, ref bias)) = self.weight_and_bias.as_ref() { if weight.dims() != [num_features] { candle::bail!( "batch-norm weight has unexpected shape {:?} should have shape [{num_features}]", weight.shape(), ) } if bias.dims() != [num_features] { candle::bail!( "batch-norm weight has unexpected shape {:?} should have shape [{num_features}]", bias.shape(), ) } } Ok(()) } pub fn new( num_features: usize, running_mean: Tensor, running_var: Tensor, weight: Tensor, bias: Tensor, eps: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: Some((weight, bias)), remove_mean: true, eps, momentum: 0.1, }; out.check_validity(num_features)?; Ok(out) } pub fn new_no_bias( num_features: usize, running_mean: Tensor, running_var: Tensor, eps: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: None, remove_mean: true, eps, momentum: 0.1, }; out.check_validity(num_features)?; Ok(out) } pub fn new_with_momentum( num_features: usize, running_mean: Tensor, running_var: Tensor, weight: Tensor, bias: Tensor, eps: f64, momentum: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: Some((weight, bias)), remove_mean: true, eps, momentum, }; out.check_validity(num_features)?; Ok(out) } pub fn new_no_bias_with_momentum( num_features: usize, running_mean: Tensor, running_var: Tensor, eps: f64, momentum: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: None, remove_mean: true, eps, momentum, }; out.check_validity(num_features)?; Ok(out) } pub fn running_mean(&self) -> &Tensor { self.running_mean.as_tensor() } pub fn running_var(&self) -> &Tensor { self.running_var.as_tensor() } pub fn eps(&self) -> f64 { self.eps } pub fn weight_and_bias(&self) -> Option<(&Tensor, &Tensor)> { self.weight_and_bias.as_ref().map(|v| (&v.0, &v.1)) } pub fn momentum(&self) -> f64 { self.momentum } pub fn forward_train(&self, x: &Tensor) -> Result<Tensor> { let num_features = self.running_mean.as_tensor().dim(0)?; let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; if x.rank() < 2 { candle::bail!( "batch-norm input tensor must have at least two dimensions ({:?})", x.shape() ) } if x.dim(1)? != num_features { candle::bail!( "batch-norm input doesn't have the expected number of features ({:?} <> {})", x.shape(), num_features ) } let x = x.to_dtype(internal_dtype)?; let x = x.transpose(0, 1)?; let x_dims_post_transpose = x.dims(); // Flatten all the dimensions exception the channel one as this performs a Spatial Batch // Normalization. let x = x.flatten_from(1)?.contiguous()?; let x = if self.remove_mean { // The mean is taken over dim 1 as this is the batch dim after the transpose(0, 1) above. let mean_x = x.mean_keepdim(1)?; let updated_running_mean = ((self.running_mean.as_tensor() * (1.0 - self.momentum))? + (mean_x.flatten_all()? * self.momentum)?)?; self.running_mean.set(&updated_running_mean)?; x.broadcast_sub(&mean_x)? } else { x }; // The mean is taken over dim 1 as this is the batch dim after the transpose(0, 1) above. let norm_x = x.sqr()?.mean_keepdim(1)?; let updated_running_var = { let batch_size = x.dim(1)? as f64; let running_var_weight = 1.0 - self.momentum; let norm_x_weight = self.momentum * batch_size / (batch_size - 1.0); ((self.running_var.as_tensor() * running_var_weight)? + (&norm_x.flatten_all()? * norm_x_weight)?)? }; self.running_var.set(&updated_running_var)?; let x = x .broadcast_div(&(norm_x + self.eps)?.sqrt()?)? .to_dtype(x_dtype)?; let x = match &self.weight_and_bias { None => x, Some((weight, bias)) => { let weight = weight.reshape(((), 1))?; let bias = bias.reshape(((), 1))?; x.broadcast_mul(&weight)?.broadcast_add(&bias)? } }; x.reshape(x_dims_post_transpose)?.transpose(0, 1) } fn forward_eval(&self, x: &Tensor) -> Result<Tensor> { let target_shape: Vec<usize> = x .dims() .iter() .enumerate() .map(|(idx, v)| if idx == 1 { *v } else { 1 }) .collect(); let target_shape = target_shape.as_slice(); let x = x .broadcast_sub( &self .running_mean .as_detached_tensor() .reshape(target_shape)?, )? .broadcast_div( &(self .running_var .as_detached_tensor() .reshape(target_shape)? + self.eps)? .sqrt()?, )?; match &self.weight_and_bias { None => Ok(x), Some((weight, bias)) => { let weight = weight.reshape(target_shape)?; let bias = bias.reshape(target_shape)?; x.broadcast_mul(&weight)?.broadcast_add(&bias) } } } } impl crate::ModuleT for BatchNorm { fn forward_t(&self, x: &Tensor, train: bool) -> Result<Tensor> { if train { self.forward_train(x) } else { self.forward_eval(x) } } } pub fn batch_norm<C: Into<BatchNormConfig>>( num_features: usize, config: C, vb: crate::VarBuilder, ) -> Result<BatchNorm> { use crate::Init; let config = config.into(); if config.eps < 0. { candle::bail!("batch-norm eps cannot be negative {}", config.eps) } let running_mean = vb.get_with_hints(num_features, "running_mean", Init::Const(0.))?; let running_var = vb.get_with_hints(num_features, "running_var", Init::Const(1.))?; let weight_and_bias = if config.affine { let weight = vb.get_with_hints(num_features, "weight", Init::Const(1.))?; let bias = vb.get_with_hints(num_features, "bias", Init::Const(0.))?; Some((weight, bias)) } else { None }; Ok(BatchNorm { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias, remove_mean: config.remove_mean, eps: config.eps, momentum: config.momentum, }) }
candle/candle-nn/src/batch_norm.rs/0
{ "file_path": "candle/candle-nn/src/batch_norm.rs", "repo_id": "candle", "token_count": 5325 }
//! Sequential Layer //! //! A sequential layer used to chain multiple layers and closures. use candle::{Module, Result, Tensor}; /// A sequential layer combining multiple other layers. pub struct Sequential { layers: Vec<Box<dyn Module>>, } /// Creates a new empty sequential layer. pub fn seq() -> Sequential { Sequential { layers: vec![] } } impl Sequential { /// The number of sub-layers embedded in this layer. pub fn len(&self) -> i64 { self.layers.len() as i64 } /// Returns true if this layer does not have any sub-layer. pub fn is_empty(&self) -> bool { self.layers.is_empty() } } impl Module for Sequential { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs)? } Ok(xs) } } impl Sequential { /// Appends a layer after all the current layers. #[allow(clippy::should_implement_trait)] pub fn add<M: Module + 'static>(mut self, layer: M) -> Self { self.layers.push(Box::new(layer)); self } /// Appends a closure after all the current layers. pub fn add_fn<F>(self, f: F) -> Self where F: 'static + Fn(&Tensor) -> Result<Tensor> + Send + Sync, { self.add(super::func(f)) } /// Applies the forward pass and returns the output for each layer. pub fn forward_all(&self, xs: &Tensor) -> Result<Vec<Tensor>> { let mut vec = Vec::with_capacity(self.layers.len()); let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs)?; vec.push(xs.clone()) } Ok(vec) } }
candle/candle-nn/src/sequential.rs/0
{ "file_path": "candle/candle-nn/src/sequential.rs", "repo_id": "candle", "token_count": 714 }
use crate::onnx::attribute_proto::AttributeType; use crate::onnx::tensor_proto::DataType; use crate::onnx::{self, GraphProto}; use candle::{bail, DType, Device, Result, Tensor}; use std::collections::{HashMap, HashSet}; pub type Value = Tensor; pub fn dtype(dt: DataType) -> Option<DType> { match dt { DataType::Uint8 => Some(DType::U8), DataType::Uint32 => Some(DType::U32), DataType::Int64 => Some(DType::I64), DataType::Float16 => Some(DType::F16), DataType::Float => Some(DType::F32), DataType::Double => Some(DType::F64), DataType::Bool => Some(DType::U8), _ => None, } } trait Attr { const TYPE: AttributeType; fn get(attr: &onnx::AttributeProto) -> Result<&Self>; } trait AttrOwned: Sized { const TYPE: AttributeType; fn get(attr: &onnx::AttributeProto) -> Result<Self>; } impl Attr for i64 { const TYPE: AttributeType = AttributeType::Int; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { Ok(&attr.i) } } impl Attr for f32 { const TYPE: AttributeType = AttributeType::Float; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { Ok(&attr.f) } } impl Attr for [i64] { const TYPE: AttributeType = AttributeType::Ints; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { Ok(attr.ints.as_slice()) } } impl Attr for str { const TYPE: AttributeType = AttributeType::String; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { std::str::from_utf8(&attr.s).map_err(candle::Error::wrap) } } impl Attr for GraphProto { const TYPE: AttributeType = AttributeType::Graph; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { attr.g .as_ref() .ok_or_else(|| candle::Error::Msg("attribute does not contain graph".to_string())) } } impl AttrOwned for Vec<String> { const TYPE: AttributeType = AttributeType::Strings; fn get(attr: &onnx::AttributeProto) -> Result<Self> { let mut ret = vec![]; for bytes in attr.strings.iter() { let s = String::from_utf8(bytes.clone()).map_err(candle::Error::wrap)?; ret.push(s); } Ok(ret) } } impl AttrOwned for Tensor { const TYPE: AttributeType = AttributeType::Tensor; fn get(attr: &onnx::AttributeProto) -> Result<Self> { let tensor_proto = match &attr.t { Some(value) => value, None => bail!( "attribute {} was of type TENSOR, but no tensor was found", attr.name ), }; let data_type = match DataType::try_from(tensor_proto.data_type) { Ok(value) => value, Err(_) => bail!( "attribute {} of type TENSOR was an invalid data_type number {}", attr.name, tensor_proto.data_type ), }; let dtype = match dtype(data_type) { Some(value) => value, None => bail!( "attribute {} of type TENSOR has an unsupported data_type {}", attr.name, data_type.as_str_name() ), }; let mut dims = Vec::with_capacity(tensor_proto.dims.len()); for dim in &tensor_proto.dims { if dim < &0 { bail!( "attribute {} of type TENSOR has a negative dimension, which is unsupported", attr.name ) } dims.push(*dim as usize) } Tensor::from_raw_buffer(&tensor_proto.raw_data, dtype, &dims, &Device::Cpu) } } fn get_attr_<'a>(node: &'a onnx::NodeProto, name: &str) -> Result<&'a onnx::AttributeProto> { match node.attribute.iter().find(|attr| attr.name == name) { None => { bail!( "cannot find the '{name}' attribute in '{}' for {}", node.op_type, node.name ) } Some(dt) => Ok(dt), } } fn get_attr<'a, T: Attr + ?Sized>(node: &'a onnx::NodeProto, name: &str) -> Result<&'a T> { let attr = get_attr_(node, name)?; if attr.r#type() != T::TYPE { bail!( "unsupported type {:?} for '{name}' attribute in '{}' for {}", attr.r#type, node.op_type, node.name ) } T::get(attr) } fn get_attr_opt<'a, T: Attr + ?Sized>( node: &'a onnx::NodeProto, name: &str, ) -> Result<Option<&'a T>> { match node.attribute.iter().find(|attr| attr.name == name) { None => Ok(None), Some(attr) => { if attr.r#type() != T::TYPE { bail!( "unsupported type {:?} for '{name}' attribute in '{}' for {}", attr.r#type, node.op_type, node.name ) } let val = T::get(attr)?; Ok(Some(val)) } } } fn get_attr_opt_owned<T: AttrOwned>(node: &onnx::NodeProto, name: &str) -> Result<Option<T>> { match node.attribute.iter().find(|attr| attr.name == name) { None => Ok(None), Some(attr) => { if attr.r#type() != T::TYPE { bail!( "unsupported type {:?} for '{name}' attribute in '{}' for {}", attr.r#type, node.op_type, node.name ) } let val = T::get(attr)?; Ok(Some(val)) } } } pub fn get_tensor(t: &onnx::TensorProto, name: &str) -> Result<Tensor> { let dims: Vec<usize> = t.dims.iter().map(|&x| x as usize).collect(); match DataType::try_from(t.data_type) { Ok(DataType::Int32) => { if t.int32_data.is_empty() { let len = t.raw_data.len() / 4; let data: &[i32] = unsafe { std::slice::from_raw_parts(t.raw_data.as_ptr() as *const i32, len) }; let data = data.iter().map(|v| *v as i64).collect::<Vec<_>>(); Tensor::from_vec(data, len, &Device::Cpu) } else { let data = t.int32_data.iter().map(|v| *v as i64).collect::<Vec<_>>(); Tensor::from_vec(data, t.int32_data.len(), &Device::Cpu) } } Ok(dt) => match dtype(dt) { Some(dt) => { if dt == DType::F32 && !t.float_data.is_empty() { Tensor::from_slice(&t.float_data, dims.as_slice(), &Device::Cpu) } else if dt == DType::F64 && !t.double_data.is_empty() { Tensor::from_slice(&t.double_data, dims.as_slice(), &Device::Cpu) } else if dt == DType::I64 && !t.int64_data.is_empty() { Tensor::from_slice(&t.int64_data, dims.as_slice(), &Device::Cpu) } else { Tensor::from_raw_buffer( t.raw_data.as_slice(), dt, dims.as_slice(), &Device::Cpu, ) } } None => { bail!("unsupported 'value' data-type {dt:?} for {name}") } }, Err(_) => { bail!("unsupported 'value' data-type {} for {name}", t.data_type,) } } } // This function provides a direct evaluation of the proto. // Longer-term, we should first convert the proto to an intermediate representation of the compute // graph so as to make multiple evaluations more efficient. // An example upside of this would be to remove intermediary values when they are not needed // anymore. pub fn simple_eval( model: &onnx::ModelProto, mut inputs: HashMap<String, Value>, ) -> Result<HashMap<String, Value>> { let graph = match &model.graph { None => bail!("no graph defined in proto"), Some(graph) => graph, }; simple_eval_(graph, &mut inputs) } fn simple_eval_( graph: &onnx::GraphProto, values: &mut HashMap<String, Value>, ) -> Result<HashMap<String, Value>> { for t in graph.initializer.iter() { let tensor = get_tensor(t, t.name.as_str())?; values.insert(t.name.to_string(), tensor); } for input in graph.input.iter() { let input_type = match &input.r#type { Some(input_type) => input_type, None => continue, }; let input_type = match &input_type.value { Some(input_type) => input_type, None => continue, }; let tensor_type = match input_type { onnx::type_proto::Value::TensorType(tt) => tt, _ => continue, }; let tensor = match values.get(&input.name) { None => bail!("missing input {}", input.name), Some(tensor) => tensor, }; let dt = match DataType::try_from(tensor_type.elem_type) { Ok(dt) => match dtype(dt) { Some(dt) => dt, None => { bail!("unsupported 'value' data-type {dt:?} for {}", input.name) } }, type_ => bail!("unsupported input type {type_:?}"), }; match &tensor_type.shape { None => continue, Some(shape) => { if shape.dim.len() != tensor.rank() { bail!( "unexpected rank for {}, got {:?}, expected {:?}", input.name, shape.dim, tensor.shape() ) } for (idx, (d, &dim)) in shape.dim.iter().zip(tensor.dims().iter()).enumerate() { match &d.value { Some(onnx::tensor_shape_proto::dimension::Value::DimValue(v)) => { if *v as usize != dim { bail!( "unexpected dim {idx} for {}, got {:?}, expected {:?}", input.name, shape.dim, tensor.shape() ) } } // We do not check equality constraints for the DimParam dimensions for now. Some(onnx::tensor_shape_proto::dimension::Value::DimParam(_)) | None => (), } } } }; if dt != tensor.dtype() { bail!( "unexpected dtype for {}, got {:?}, expected {dt:?}", input.name, tensor.dtype() ) } } // The nodes are topologically sorted so we can just process them in order. for node in graph.node.iter() { let get = |input_name: &str| match values.get(input_name) { Some(value) => Ok(value), None => bail!("cannot find {input_name} for op '{}'", node.name), }; let get_opt = |i: usize| { node.input .get(i) .filter(|s: &&String| !s.is_empty()) .map(|s| get(s)) }; // TODO: Validate node.input for each operator. match node.op_type.as_str() { "Add" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_add(input1)?; values.insert(node.output[0].clone(), output); } "Sub" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_sub(input1)?; values.insert(node.output[0].clone(), output); } "Mul" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_mul(input1)?; values.insert(node.output[0].clone(), output); } "Div" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_div(input1)?; values.insert(node.output[0].clone(), output); } "Pow" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; // HACK: current implementation of broadcast_pow cannot handle negative base, // so we use powf where we can, which *does* correctly handle negative base. if let Ok(exp) = (|| input1.to_dtype(DType::F64)?.to_scalar::<f64>())() { let output = input0.powf(exp)?; values.insert(node.output[0].clone(), output); } else { let output = input0.broadcast_pow(input1)?; values.insert(node.output[0].clone(), output); } } "Exp" => { let xs = get(&node.input[0])?; let output = xs.exp()?; values.insert(node.output[0].clone(), output); } "Equal" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_eq(input1)?; values.insert(node.output[0].clone(), output); } "Not" => { let xs = get(&node.input[0])?; let xs = xs.eq(&xs.zeros_like()?)?; values.insert(node.output[0].clone(), xs); } "MatMul" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_matmul(input1)?; values.insert(node.output[0].clone(), output); } "Reshape" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?.to_vec1::<i64>()?; // TODO: Check that there is at most a single -1 or 0, handle other neg values. let mut other_than_minus1 = 1usize; for &v in input1.iter() { if v != -1 && v != 0 { other_than_minus1 *= v as usize } } let input1 = input1 .iter() .enumerate() .map(|(idx, &v)| match v { -1 => Ok(input0.elem_count() / other_than_minus1), 0 => input0.dim(idx), _ => Ok(v as usize), }) .collect::<Result<Vec<usize>>>()?; let output = input0.reshape(input1)?; values.insert(node.output[0].clone(), output); } "LogSoftmax" => { let input = get(&node.input[0])?; let output = match get_attr_opt::<i64>(node, "axis")? { None => candle_nn::ops::softmax_last_dim(input)?, Some(&axis) => { let axis = input.normalize_axis(axis)?; candle_nn::ops::log_softmax(input, axis)? } }; values.insert(node.output[0].clone(), output); } "Softmax" => { let input = get(&node.input[0])?; let output = match get_attr_opt::<i64>(node, "axis")? { None => candle_nn::ops::softmax_last_dim(input)?, Some(&axis) => { let axis = input.normalize_axis(axis)?; candle_nn::ops::softmax(input, axis)? } }; values.insert(node.output[0].clone(), output); } "Transpose" => { let input = get(&node.input[0])?; let output = match get_attr_opt::<[i64]>(node, "perm")? { None => input.t()?, Some(perm) => { let perm = perm.iter().map(|&v| v as usize).collect::<Vec<_>>(); input.permute(perm)? } }; values.insert(node.output[0].clone(), output); } "Dropout" => { let input = get(&node.input[0])?; // Do not apply dropout at the moment, consider that we're only doing inference. values.insert(node.output[0].clone(), input.clone()); } "MaxPool" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#MaxPool let dilations = get_attr_opt::<[i64]>(node, "dilations")?; let kernel_shape = get_attr::<[i64]>(node, "kernel_shape")?; let pads = get_attr_opt::<[i64]>(node, "pads")?; let strides = get_attr_opt::<[i64]>(node, "strides")?; let auto_pad = get_attr_opt::<str>(node, "auto_pad")?; match auto_pad { None | Some("NOTSET") => (), Some(s) => bail!("unsupported auto_pad {s}"), }; if let Some(d) = dilations { if d.iter().any(|&v| v != 1) { bail!("MaxPool with dilation != 1, {dilations:?}") } } if let Some(d) = pads { if d.iter().any(|&v| v != 0) { bail!("MaxPool with pads != 0, {pads:?}") } } let xs = get(&node.input[0])?; let (k1, k2) = match kernel_shape { [k1, k2] => (*k1 as usize, *k2 as usize), _ => bail!("only 2d MaxPool is supported, kernel shape {kernel_shape:?}"), }; let ys = match strides { None => xs.max_pool2d((k1, k2))?, Some([s1, s2]) => { xs.max_pool2d_with_stride((k1, k2), (*s1 as usize, *s2 as usize))? } Some(strides) => bail!("only 2d MaxPool is supported, strides {strides:?}"), }; values.insert(node.output[0].clone(), ys); } "AveragePool" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#AveragePool let dilations = get_attr_opt::<[i64]>(node, "dilations")?; let kernel_shape = get_attr::<[i64]>(node, "kernel_shape")?; let pads = get_attr_opt::<[i64]>(node, "pads")?; let strides = get_attr_opt::<[i64]>(node, "strides")?; let auto_pad = get_attr_opt::<str>(node, "auto_pad")?; match auto_pad { None | Some("NOTSET") => (), Some(s) => bail!("unsupported auto_pad {s}"), }; if let Some(d) = dilations { if d.iter().any(|&v| v != 1) { bail!("AvgPool with dilation != 1, {dilations:?}") } } if let Some(d) = pads { if d.iter().any(|&v| v != 0) { bail!("AvgPool with pads != 0, {pads:?}") } } let xs = get(&node.input[0])?; let (k1, k2) = match kernel_shape { [k1, k2] => (*k1 as usize, *k2 as usize), _ => bail!("only 2d AvgPool is supported, kernel shape {kernel_shape:?}"), }; let ys = match strides { None => xs.avg_pool2d((k1, k2))?, Some([s1, s2]) => { xs.avg_pool2d_with_stride((k1, k2), (*s1 as usize, *s2 as usize))? } Some(strides) => bail!("only 2d AvgPool is supported, strides {strides:?}"), }; values.insert(node.output[0].clone(), ys); } "BatchNormalization" => { let training_mode = get_attr_opt::<i64>(node, "training_mode")?; if training_mode.copied().unwrap_or(0) != 0 { bail!("training mode is not supported for BatchNorm") } let eps = get_attr_opt::<f32>(node, "epsilon")? .copied() .unwrap_or(1e-5); let xs = get(&node.input[0])?; let weight = get(&node.input[1])?; let bias = get(&node.input[2])?; let running_mean = get(&node.input[3])?; let running_var = get(&node.input[4])?; let target_shape: Vec<usize> = xs .dims() .iter() .enumerate() .map(|(idx, v)| if idx == 1 { *v } else { 1 }) .collect(); let target_shape = target_shape.as_slice(); let xs = xs .broadcast_sub(&running_mean.reshape(target_shape)?)? .broadcast_div(&(running_var.reshape(target_shape)? + eps as f64)?.sqrt()?)?; let weight = weight.reshape(target_shape)?; let bias = bias.reshape(target_shape)?; let xs = xs.broadcast_mul(&weight)?.broadcast_add(&bias)?; values.insert(node.output[0].clone(), xs); } "Squeeze" => { let xs = get(&node.input[0])?; let mut axes = if node.input.len() <= 1 { // contract all the dimensions with size 1 except the batch dim. xs.dims() .iter() .enumerate() .flat_map(|(idx, &s)| if s == 1 && idx > 0 { Some(idx) } else { None }) .collect() } else { get(&node.input[1])? .to_vec1::<i64>()? .iter() .map(|&i| xs.normalize_axis(i)) .collect::<Result<Vec<_>>>()? }; axes.sort(); let mut xs = xs.clone(); for &axis in axes.iter().rev() { xs = xs.squeeze(axis)? } values.insert(node.output[0].clone(), xs); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConstantOfShape "ConstantOfShape" => { let input = get(&node.input[0])?; let value = get_attr_opt_owned::<Tensor>(node, "value")?.unwrap_or(Tensor::zeros( (), DType::F32, &Device::Cpu, )?); let xs = Tensor::ones(input.shape(), value.dtype(), input.device())? .broadcast_mul(&value)?; values.insert(node.output[0].clone(), xs); } "Unsqueeze" => { let xs = get(&node.input[0])?; let axes = match get_attr_opt::<[i64]>(node, "axes")? { Some(axis) => axis.to_vec(), None => get(&node.input[1])?.to_vec1::<i64>()?, }; let mut axes = axes .iter() .map(|&i| { if i == xs.rank() as i64 { Ok(xs.rank()) } else if i < 0 { // normalize_axis doesn't work correctly here // because we actually want normalized with respect // to the final size, not the current (off by one) Ok(xs.rank() - (-i as usize) + 1) } else { xs.normalize_axis(i) } }) .collect::<Result<Vec<_>>>()?; axes.sort(); let mut xs = xs.clone(); for &axis in axes.iter().rev() { xs = xs.unsqueeze(axis)? } values.insert(node.output[0].clone(), xs); } "Clip" => { let xs = get(&node.input[0])?; let xs = if let Some(mins) = get_opt(1) { xs.broadcast_maximum(mins?)? } else { xs.clone() }; let xs = if let Some(maxs) = get_opt(2) { xs.broadcast_minimum(maxs?)? } else { xs.clone() }; values.insert(node.output[0].clone(), xs); } "Gather" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Gather let xs = get(&node.input[0])?; let indices = get(&node.input[1])?; let axis = get_attr_opt::<i64>(node, "axis")?.copied().unwrap_or(0); let axis = xs.normalize_axis(axis)?; // index_select does not support negative indices, so normalize them // to positive indices. let indices = &{ let zeros = Tensor::zeros(indices.shape(), indices.dtype(), indices.device())?; let max = Tensor::new(xs.dims()[axis] as i64, indices.device())? .to_dtype(indices.dtype())?; let mask = indices.lt(&zeros)?; mask.to_dtype(indices.dtype())? .broadcast_mul(&max)? .add(indices)? }; // In Pytorch or Numpy this can be done by indexing the xs tensor using the indices // tensor directly, but candle does not support tensor indexing at the moment, so // some workarounds must be done. let xs = match indices.dims() { [] => { let index = indices.to_vec0::<i64>()? as usize; xs.narrow(axis, index, 1)?.squeeze(axis)? } [_] => xs.index_select(indices, axis)?, [first, _] => { let mut v = Vec::with_capacity(*first); for i in 0..*first { v.push(xs.index_select(&indices.get(i)?, axis)?) } Tensor::stack(&v, axis)? } _ => { // TODO: Provide an op to handle the ONNX generalized gather op ideally in a // differentiable way. todo!("implement gather for {xs:?} {indices:?} axis {axis}") } }; values.insert(node.output[0].clone(), xs); } // https://onnx.ai/onnx/operators/onnx__GatherElements.html#gatherelements // A Note to fellow lurkers: // The numpy based `gather_elements` implementation in `onnx` tests [here](https://github.com/onnx/onnx/blob/main/onnx/backend/test/case/node/gatherelements.py) // and examples is incorrect. // Use `torch.gather` for the validating/ verifying against the proper behaviour "GatherElements" => { let data = get(&node.input[0])?; let indices = get(&node.input[1])?; let rank = data.rank(); if rank != indices.rank() { bail!("indices must have same rank as input data. Data rank [{}] != indices rank [{}]", data.rank(), indices.rank()); } let axis = { let axis_i64 = get_attr_opt::<i64>(node, "axis")?.copied().unwrap_or(0); let axis = data.normalize_axis(axis_i64)?; if axis >= rank { bail!( "axis ({}) out of accepted range [-rank, rank-1] which was [-{rank}, {}]", axis_i64, rank - 1 ) } axis }; // index_select does not support negative indices, so normalize them // to positive indices. let indices = &{ let zeros = Tensor::zeros(indices.shape(), indices.dtype(), indices.device())?; let max = Tensor::new(data.dims()[axis] as i64, indices.device())? .to_dtype(indices.dtype())?; let mask = indices.lt(&zeros)?; mask.to_dtype(indices.dtype())? .broadcast_mul(&max)? .add(indices)? }; values.insert(node.output[0].clone(), data.gather(indices, axis)?); } "Shape" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Shape let xs = get(&node.input[0])?; let start = get_attr_opt::<i64>(node, "start")?.copied().unwrap_or(0); let end = get_attr_opt::<i64>(node, "end")?.copied().unwrap_or(-1); let start = xs.normalize_axis(start)?; let end = xs.normalize_axis(end)?; let mut dims = vec![]; for idx in start..=end { dims.push(xs.dim(idx)? as i64) } let dims = Tensor::from_vec(dims, xs.rank(), xs.device())?; values.insert(node.output[0].clone(), dims); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Size "Size" => { let data = get(&node.input[0])?; let size: usize = data.dims().iter().product(); let output = Tensor::from_slice(&[size as i64], (), data.device())?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Sqrt "Sqrt" => { let xs = get(&node.input[0])?; let output = xs.sqrt()?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Range "Range" => { let start = get(&node.input[0])?; let limit = get(&node.input[1])?; let delta = get(&node.input[2])?; macro_rules! arange_step { ($t: ty) => { Tensor::arange_step( start.to_vec0::<$t>()?, limit.to_vec0::<$t>()?, delta.to_vec0::<$t>()?, &Device::Cpu, )? }; } let output = match start.dtype() { DType::U8 => arange_step!(u8), DType::U32 => arange_step!(u32), DType::I64 => arange_step!(i64), DType::BF16 => arange_step!(f32), DType::F16 => arange_step!(f32), DType::F32 => arange_step!(f32), DType::F64 => arange_step!(f64), }; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Greater "Greater" => { let a = get(&node.input[0])?; let b = get(&node.input[1])?; let output = a.broadcast_gt(b)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Less "Less" => { let a = get(&node.input[0])?; let b = get(&node.input[1])?; let output = a.broadcast_lt(b)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Log "Log" => { let a = get(&node.input[0])?; let output = a.log()?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Min "Min" => { let mut output = get(&node.input[0])?.clone(); for input in node.input.iter() { let input = get(input)?; output = output.broadcast_minimum(input)? } values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Where "Where" => { let cond = get(&node.input[0])?; let a = get(&node.input[1])?; let b = get(&node.input[2])?; // where_cond requires that all inputs are the same shape. // In contrast, the Where op in ONNX only requires that they are broadcastable. let shape = broadcast_shape_from_many(&[cond.dims(), a.dims(), b.dims()])?; let cond = cond.broadcast_as(shape.clone())?; let a = a.broadcast_as(shape.clone())?; let b = b.broadcast_as(shape)?; let output = cond.where_cond(&a, &b)?; values.insert(node.output[0].clone(), output); } "Conv" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Conv let dilations = get_attr_opt::<[i64]>(node, "dilations")?; let groups = get_attr_opt::<i64>(node, "group")?.copied().unwrap_or(1); let _kernel_shape = get_attr_opt::<[i64]>(node, "kernel_shape")?; let pads = get_attr_opt::<[i64]>(node, "pads")?; let strides = get_attr_opt::<[i64]>(node, "strides")?; let auto_pad = get_attr_opt::<str>(node, "auto_pad")?; match auto_pad { None | Some("NOTSET") => (), Some(s) => bail!("unsupported auto_pad {s}"), }; let xs = get(&node.input[0])?; let ws = get(&node.input[1])?; let ys = match ws.rank() { 3 => { let (pads, xs) = match pads { None => (0, xs.clone()), Some([p]) => (*p as usize, xs.clone()), Some([p1, p2]) => { if p1 != p2 { (0usize, xs.pad_with_zeros(2, *p1 as usize, *p2 as usize)?) } else { (*p1 as usize, xs.clone()) } } Some(pads) => { bail!("more pads than expected in conv1d {pads:?} {}", node.name) } }; let strides = match strides { None => 1, Some([p]) => *p as usize, Some(s) => { bail!("more strides than expected in conv1d {s:?} {}", node.name) } }; let dilations = match dilations { None => 1, Some([p]) => *p as usize, Some(s) => { bail!("more dilations than expected in conv1d {s:?} {}", node.name) } }; xs.conv1d(ws, pads, strides, dilations, groups as usize)? } 4 => { let (pads, xs) = match pads { None => (0, xs.clone()), Some([p]) => (*p as usize, xs.clone()), Some(&[p1, p2, p3, p4]) => { let p1 = p1 as usize; let p2 = p2 as usize; let p3 = p3 as usize; let p4 = p4 as usize; if p1 != p2 || p1 != p3 || p1 != p4 { (0, xs.pad_with_zeros(2, p1, p3)?.pad_with_zeros(3, p2, p4)?) } else { (p1, xs.clone()) } } Some(pads) => { bail!("more pads than expected in conv2d {pads:?} {}", node.name) } }; let strides = match strides { None => 1, Some([p]) => *p as usize, Some([p1, p2]) => { if p1 != p2 { bail!( "strides have to be the same on both axis {pads:?} {}", node.name ) } *p1 as usize } Some(s) => { bail!("more strides than expected in conv2d {s:?} {}", node.name) } }; let dilations = match dilations { None => 1, Some([p]) => *p as usize, Some([p1, p2]) => { if p1 != p2 { bail!( "dilations have to be the same on both axis {pads:?} {}", node.name ) } *p1 as usize } Some(s) => { bail!("more dilations than expected in conv2d {s:?} {}", node.name) } }; xs.conv2d(ws, pads, strides, dilations, groups as usize)? } rank => bail!( "unsupported rank for weight matrix {rank} in conv {}", node.name ), }; let ys = if node.input.len() > 2 { let bs = get(&node.input[2])?; let mut bs_shape = vec![1; ys.rank()]; bs_shape[1] = bs.elem_count(); ys.broadcast_add(&bs.reshape(bs_shape)?)? } else { ys }; values.insert(node.output[0].clone(), ys); } "Concat" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Concat let inputs = node .input .iter() .map(|n| Ok(get(n.as_str())?.clone())) .collect::<Result<Vec<Value>>>()?; let axis: i64 = *get_attr(node, "axis")?; if inputs.is_empty() { bail!("empty concat") }; let axis = inputs[0].normalize_axis(axis)?; let output = Tensor::cat(&inputs, axis)?; values.insert(node.output[0].clone(), output); } "Abs" => { let input = get(&node.input[0])?; let output = input.abs()?; values.insert(node.output[0].clone(), output); } "Cos" => { let input = get(&node.input[0])?; let output = input.cos()?; values.insert(node.output[0].clone(), output); } "Sin" => { let input = get(&node.input[0])?; let output = input.sin()?; values.insert(node.output[0].clone(), output); } "Neg" => { let input = get(&node.input[0])?; let output = input.neg()?; values.insert(node.output[0].clone(), output); } "Erf" => { let input = get(&node.input[0])?; let output = input.erf()?; values.insert(node.output[0].clone(), output); } "Tanh" => { let input = get(&node.input[0])?; let output = input.tanh()?; values.insert(node.output[0].clone(), output); } "Sigmoid" => { let input = get(&node.input[0])?; let output = candle_nn::ops::sigmoid(input)?; values.insert(node.output[0].clone(), output); } "Gelu" => { let input = get(&node.input[0])?; let output = input.gelu_erf()?; values.insert(node.output[0].clone(), output); } "Relu" => { let input = get(&node.input[0])?; let output = input.relu()?; values.insert(node.output[0].clone(), output); } "Ceil" => { let input = get(&node.input[0])?; let output = input.ceil()?; values.insert(node.output[0].clone(), output); } "Floor" => { let input = get(&node.input[0])?; let output = input.floor()?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Constant "Constant" => { let value = match node.attribute.iter().find(|attr| attr.name == "value") { None => { // TODO: support sparse_value etc. bail!("cannot find 'value' attr in 'Constant' for {}", node.name) } Some(value) => value, }; let output = match value.r#type() { AttributeType::Tensor => { let t = value.t.as_ref().unwrap(); get_tensor(t, &node.name)? } rtype => bail!("unsupported 'value' type {rtype:?} for {}", node.name), }; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Cast "Cast" => { let input = get(&node.input[0])?; let dt: i64 = *get_attr(node, "to")?; let dtype = match DataType::try_from(dt as i32) { Ok(DataType::Int32) => DType::I64, Ok(dt) => match dtype(dt) { Some(dt) => dt, None => { bail!("unsupported 'to' value {dt:?} for cast {}", node.name) } }, Err(_) => { bail!("unsupported 'to' value {dt:?} for cast {}", node.name) } }; let output = input.to_dtype(dtype)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#CumSum "CumSum" => { let exclusive = get_attr_opt::<i64>(node, "exclusive")? .copied() .unwrap_or(0); let reverse = get_attr_opt::<i64>(node, "reverse")?.copied().unwrap_or(0); if exclusive != 0 { bail!("only exclusive == 0 is supported in CumSum") } if reverse != 0 { bail!("only reverse == 0 is supported in CumSum") } let input = get(&node.input[0])?; let axis = get(&node.input[1])? .to_dtype(DType::U32)? .to_vec0::<u32>()?; let output = input.cumsum(axis as usize)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#flatten "Flatten" => { let axis = get_attr_opt::<i64>(node, "axis")?.copied().unwrap_or(1) as usize; let input = get(&node.input[0])?; let first_part: usize = input.shape().dims().iter().take(axis).product(); let end_index = input.shape().dims().iter().product::<usize>(); let new_shape = (first_part, end_index / first_part); let output = input.reshape(new_shape)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#identity "Identity" => { let input = get(&node.input[0])?; values.insert(node.output[0].clone(), input.clone()); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#if "If" => { // protobuf encodes boolean false as 0 and true as 1 let cond = get(&node.input[0])?.get(0)?.to_scalar::<u8>()?; let attr_name = if cond != 0 { "then_branch" } else { "else_branch" }; let sub_graph = get_attr::<GraphProto>(node, attr_name)?; if sub_graph.output.len() != node.output.len() { bail!( "If node {:?} is malformed: branch outputs ({}) don't match node outputs ({})", node.name, sub_graph.output.len(), node.output.len() ); } let branch_out = simple_eval_(sub_graph, values)?; for (i, out) in node.output.iter().enumerate() { values.insert( out.clone(), branch_out.get(&sub_graph.output[i].name).unwrap().clone(), ); } } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#pad "Pad" => { let mode = get_attr_opt(node, "mode")?.unwrap_or("constant"); let data = get(&node.input[0])?; let pads = get(&node.input[1])?; if node.input.len() > 2 { bail!( "unsupported number of inputs {} for Pad node {:?}, expected 2", node.input.len(), node.name ); } if pads.rank() != 1 { bail!("Pad expects 'pads' input to be 1D vector: {pads:?}"); } if pads.dim(0).unwrap() != 2 * data.rank() { bail!("Pad expects 'pads' input len to be 2 * rank of 'data' input: pads: {}, data rank: {}", pads, data.rank()); } let pads = pads.to_vec1::<i64>()?; let (pads_pre, pads_post) = pads.split_at(pads.len() / 2); match mode { "reflect" => { let mut out = data.clone(); for (i, &dim) in data.dims().iter().enumerate().rev() { if pads_pre[i] == 0 && pads_post[i] == 0 { continue; } fn zigzag(min: i64, max: i64) -> impl Iterator<Item = i64> { std::iter::repeat((min..max).chain((min + 1..=max).rev())).flatten() } let idx = if dim > 1 { let cycle_len = dim * 2 - 2; let skip = cycle_len - ((pads_pre[i] as usize) % cycle_len); let idx = zigzag(0, (dim - 1) as i64) .skip(skip) .take((pads_pre[i] as usize) + dim + (pads_post[i] as usize)); Tensor::from_iter(idx, out.device())? } else { Tensor::full(0i64, (dim,), out.device())? }; out = out.index_select(&idx, i)?; } values.insert(node.output[0].clone(), out); } _ => bail!( "unsupported 'mode' value {mode:?} for Pad node {:?}", node.name ), } } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#slice "Slice" => { let data = get(&node.input[0])?; let starts = get(&node.input[1])?; let ends = get(&node.input[2])?; let default_axes; let default_steps; let axes: &Tensor; let steps: &Tensor; // If axes are omitted, they are set to [0, ..., r-1]. If steps are omitted, // they are set to [1, ..., 1] of length len(starts) match node.input.len() { 3 => { let len = starts.dims()[0]; default_axes = Some(Tensor::arange(0, len as i64, starts.device())?); axes = default_axes.as_ref().unwrap(); default_steps = Some(Tensor::ones((len,), DType::I64, starts.device())?); steps = default_steps.as_ref().unwrap(); } 4 => { let len = starts.dims()[0]; axes = get(&node.input[3])?; default_steps = Some(Tensor::ones((len,), DType::I64, starts.device())?); steps = default_steps.as_ref().unwrap(); } 5 => { steps = get(&node.input[4])?; axes = get(&node.input[3])?; } _ => bail!( "Slice node is invalid, expected 3-5 inputs, got {}: {:?}", node.input.len(), node ), } let mut out = data.clone(); for (i, axis) in axes.to_vec1::<i64>()?.into_iter().enumerate() { // All negative elements of axes are made non-negative by // adding r to them, where r = rank(input). let axis = if axis < 0 { axis + data.rank() as i64 } else { axis } as usize; let data_dim = data.dims()[axis] as i64; let mut s = starts.get(i)?.to_scalar::<i64>()?; let mut e = ends.get(i)?.to_scalar::<i64>()?; // All negative values in starts[i] and ends[i] have // dims[axes[i]] added to them, where dims are the // dimensions of input. if s < 0 { s += data_dim; } if e < 0 { e += data_dim; } let p = steps.get(i)?.to_scalar::<i64>()?; // starts[i] is clamped into the range [0, dims[axes[i]]] // for positive stepping and [0, dims[axes[i]]-1] for // negative stepping. // for positive stepping ends[axes[i]] is clamped to // [0, dims[axes[i]]], while for negative stepping it is // clamped to [-1, dims[axes[i]]-1]. if p >= 0 { s = s.clamp(0, data_dim); e = e.clamp(0, data_dim); } else { s = s.clamp(0, data_dim - 1); e = e.clamp(-1, data_dim - 1); } let indexes = Tensor::arange_step(s, e, p, data.device())?; out = out.index_select(&indexes, axis)? } values.insert(node.output[0].clone(), out); } // https://onnx.ai/onnx/operators/onnx__ReduceMax.html#reducemax "ReduceMax" => { let input = get(&node.input[0])?; let axes = get_opt(1); let keepdims = get_attr_opt::<i64>(node, "keepdims")?.copied().unwrap_or(1) == 1; let axes = if let Some(Ok(axes)) = axes { // Satisfies version 18+ axes.to_vec1::<i64>().ok() } else if let Ok(Some(axes)) = get_attr_opt::<[i64]>(node, "axes") { // Backward compatiblity with version 13 and below Some(axes.to_vec()) } else { None }; let axes = if let Some(axes) = axes { let rank = input.rank(); let mut axes_set = HashSet::new(); let mut axes = axes .iter() .map(|a| { let axis = if *a < 0 { (rank as i64 + *a) as usize } else { *a as usize }; axes_set.insert(axis); axis }) .collect::<Vec<_>>(); if axes_set.len() < axes.len() { bail!("Duplicate value in 'axes'"); } if axes.len() > 1 { axes.sort(); } Some(axes) } else { None }; // TODO: Handle empty set // Definition: // "Reduction over an empty set of values yields minus infinity (if supported by the datatype) or the minimum value of the data type otherwise" // For now, this will throw an error if input.elem_count() == 0 { bail!("reduction over zero-size tensor not supported"); } let output = if let Some(axes) = axes { let mut result = input.clone(); for &axis in axes.iter().rev() { result = if keepdims { result.max_keepdim(axis)? } else { result.max(axis)? } } result } else { // If `axes` is empty and `noop_with_empty_axes` is set to `true (1)` // ""input tensor will not be reduced,and the output tensor would be equivalent to input tensor."" if get_attr_opt::<i64>(node, "noop_with_empty_axes")?.copied() == Some(1) { input.clone() } else { let mut result = input.flatten_all()?; if keepdims { result = result.max_keepdim(0)?; // If keepdims is true, reshape to match input dimensions let shape = vec![1; input.rank()]; result.reshape(shape)? } else { result.max(0)? } } }; values.insert(node.output[0].clone(), output); } // https://onnx.ai/onnx/operators/onnx__ReduceMean.html#reducemean-13 // TODO: This version is only compatible with ReduceMean V13 and below. "ReduceMean" => { let input = get(&node.input[0])?; let axes = get_attr_opt::<[i64]>(node, "axes")?; let keepdims = get_attr_opt::<i64>(node, "keepdims")?.copied().unwrap_or(1); let n_dims = input.dims().len(); let axes: Vec<usize> = if let Some(axes) = axes { axes.iter() .map(|e| (if e < &0 { (n_dims as i64) + *e } else { *e }) as usize) .collect() } else { (0..n_dims).collect() }; let output = if keepdims == 1 { input.mean_keepdim(axes)? } else { input.mean(axes)? }; values.insert(node.output[0].clone(), output); } // https://onnx.ai/onnx/operators/onnx__ReduceMin.html#reducemin "ReduceMin" => { let input = get(&node.input[0])?; let axes = get_opt(1); let keepdims = get_attr_opt::<i64>(node, "keepdims")?.copied().unwrap_or(1) == 1; let axes = if let Some(Ok(axes)) = axes { // Satisfies version 18+ axes.to_vec1::<i64>().ok() } else if let Ok(Some(axes)) = get_attr_opt::<[i64]>(node, "axes") { // Backward compatiblity with version 13 and below Some(axes.to_vec()) } else { None }; let axes = if let Some(axes) = axes { let rank = input.rank(); let mut axes_set = HashSet::new(); let mut axes = axes .iter() .map(|a| { let axis = if *a < 0 { (rank as i64 + *a) as usize } else { *a as usize }; axes_set.insert(axis); axis }) .collect::<Vec<_>>(); if axes_set.len() < axes.len() { bail!("Duplicate value in 'axes'"); } if axes.len() > 1 { axes.sort(); } Some(axes) } else { None }; // TODO: Handle empty set // Definition: // "Reduction over an empty set of values yields positive infinity (if supported by the datatype) or the max value of the data type otherwise" // For now, this will throw an error if input.elem_count() == 0 { bail!("reduction over zero-size tensor not supported"); } let output = if let Some(axes) = axes { let mut result = input.clone(); for &axis in axes.iter().rev() { result = if keepdims { result.min_keepdim(axis)? } else { result.min(axis)? } } result } else { // If `axes` is empty and `noop_with_empty_axes` is set to `true (1)` // ""input tensor will not be reduced,and the output tensor would be equivalent to input tensor."" if get_attr_opt::<i64>(node, "noop_with_empty_axes")?.copied() == Some(1) { input.clone() } else { let mut result = input.flatten_all()?; if keepdims { result = result.min_keepdim(0)?; // If keepdims is true, reshape to match input dimensions let shape = vec![1; input.rank()]; result.reshape(shape)? } else { result.min(0)? } } }; values.insert(node.output[0].clone(), output); } //https://github.com/onnx/onnx/blob/main/docs/Operators.md#Split // Version 18 impl "Split" => { let input_tensor = get(&node.input[0])?; let axis = get_attr_opt::<i64>(node, "axis")?.copied().unwrap_or(0); let axis = input_tensor.normalize_axis(axis)?; // Determine split sizes let splits = if node.input.len() > 1 { // If the split tensor is provided, use it to determine sizes let split_tensor = get(&node.input[1])?.to_vec1::<i64>()?; split_tensor.iter().map(|&x| x as usize).collect::<Vec<_>>() } else { let num_outputs = if let Some(&num_outputs_attrib) = get_attr_opt::<i64>(node, "num_outputs")? { num_outputs_attrib as usize } else { node.output.len() }; let input_dim = input_tensor.dim(axis)?; let mut split_sizes = vec![input_dim / num_outputs as usize; num_outputs as usize]; let remainder = input_dim % num_outputs as usize; if remainder > 0 { // If there's a remainder, add it to the last split size split_sizes[num_outputs as usize - 1] += remainder; } split_sizes }; // Perform the split operation let mut outputs = vec![]; let mut start = 0; for &size in &splits { let end = start + size; let slice = input_tensor.narrow(axis, start, size)?; outputs.push(slice); start = end; } // Insert the split outputs into the values map for (output, slice) in node.output.iter().zip(outputs.into_iter()) { values.insert(output.clone(), slice); } } //https://github.com/onnx/onnx/blob/main/docs/Operators.md#Expand // Version 13 impl "Expand" => { // unlike broadcast_to, expand allows for the output shape to // be different from the specified shape. let input_tensor = get(&node.input[0])?; let input_shape = get(&node.input[1])?; // Check that the shape tensor is 1D if input_shape.rank() != 1 { bail!( "Expand expects 'shape' input to be 1D tensor: {:?}", input_shape ); } let input_tensor_dims = input_tensor.dims(); let input_shape_dims = input_shape .to_vec1::<i64>()? .into_iter() .map(|x| x as usize) .collect::<Vec<_>>(); let target_shape = broadcast_shape(input_tensor_dims, input_shape_dims.as_slice())?; let expanded_tensor = input_tensor.broadcast_as(target_shape)?; values.insert(node.output[0].clone(), expanded_tensor); } //https://github.com/onnx/onnx/blob/main/docs/Operators.md#ReduceSum // Version 13 impl "ReduceSum" => { let input = get(&node.input[0])?; let axes = get_opt(1); let keepdims = get_attr_opt::<i64>(node, "keepdims")?.copied().unwrap_or(1); let noop_with_empty_axes = get_attr_opt::<i64>(node, "noop_with_empty_axes")? .copied() .unwrap_or(0); let axes = match axes { Some(Ok(axes)) => axes .to_vec1::<i64>()? .into_iter() .map(|x| x as usize) .collect::<Vec<_>>(), Some(Err(_)) | None => { if noop_with_empty_axes == 1 { vec![] } else { (0..input.rank()).collect() } } }; let output = if keepdims == 1 { input.sum_keepdim(axes)? } else { input.sum(axes)? }; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#ReduceL2 // Version 18 impl "ReduceL2" => { let input = get(&node.input[0])?; let axes = get_opt(1); let keepdims = get_attr_opt::<i64>(node, "keepdims")?.copied().unwrap_or(1); let noop_with_empty_axes = get_attr_opt::<i64>(node, "noop_with_empty_axes")? .copied() .unwrap_or(0); let input_sq = input.sqr()?; let axes = match axes { Some(axes) => axes? .to_vec1::<i64>()? .into_iter() .map(|x| x as usize) .collect::<Vec<_>>(), None => { if noop_with_empty_axes == 1 { vec![] } else { (0..input_sq.rank()).collect() } } }; let output = if keepdims == 1 { input_sq.sum_keepdim(axes)?.sqrt()? } else { input_sq.sum(axes)?.sqrt()? }; values.insert(node.output[0].clone(), output); } random_type @ ("RandomUniform" | "RandomNormal") => { let dt: i64 = get_attr_opt(node, "dtype")?.copied().unwrap_or(1); // 1 is float // type by // default let dtype = match DataType::try_from(dt as i32) { Ok(dt) => match dtype(dt) { Some(DType::U8 | DType::U32 | DType::I64) => { bail!( "unsupported 'dtype' value {dt:?}, only floats are allowed, for {random_type} {}", node.name ) } Some(dt) => dt, None => { bail!( "unsupported 'dtype' value {dt:?} for {random_type} {}", node.name ) } }, Err(_) => { bail!( "unsupported 'dtype' value {dt:?} for {random_type} {}", node.name ) } }; let seed: Option<f32> = get_attr_opt(node, "seed")?.copied(); if seed.is_some() { bail!("seed for {random_type} is currently not supported") }; let shape: Vec<usize> = get_attr::<[i64]>(node, "shape")? .iter() .map(|x| *x as usize) .collect(); let output = if random_type == "RandomUniform" { let low: f32 = get_attr_opt(node, "low")?.copied().unwrap_or(0.0); let high: f32 = get_attr_opt(node, "high")?.copied().unwrap_or(1.0); Tensor::rand(low, high, shape, &Device::Cpu)?.to_dtype(dtype)? } else { let mean: f32 = get_attr_opt(node, "mean")?.copied().unwrap_or(0.0); let scale: f32 = get_attr_opt(node, "scale")?.copied().unwrap_or(1.0); Tensor::randn(mean, scale, shape, &Device::Cpu)?.to_dtype(dtype)? }; values.insert(node.output[0].clone(), output); } "ArgMin" => { let input = get(&node.input[0])?; let axis_i64: i64 = get_attr_opt(node, "axis")?.copied().unwrap_or(0); let rank_i64: i64 = input.rank().try_into().unwrap(); if axis_i64 < -rank_i64 || axis_i64 >= rank_i64 { bail!( "axis ({}) out of accepted range [-rank, rank-1] which was [{}, {}]", axis_i64, -rank_i64, rank_i64 - 1 ) } let axis = input.normalize_axis(axis_i64)?; let keepdims: i64 = get_attr_opt(node, "keepdims")?.copied().unwrap_or(1); let select_last_index: i64 = get_attr_opt(node, "select_last_index")? .copied() .unwrap_or(0); if select_last_index == 1 { bail!("select_last_index for ArgMin is currently not supported") } let output = if keepdims == 1 { input.argmin_keepdim(axis)? } else { input.argmin(axis)? } .to_dtype(DType::I64)?; values.insert(node.output[0].clone(), output); } "ArgMax" => { let input = get(&node.input[0])?; let axis_i64: i64 = get_attr_opt(node, "axis")?.copied().unwrap_or(0); let rank_i64: i64 = input.rank().try_into().unwrap(); if axis_i64 < -rank_i64 || axis_i64 >= rank_i64 { bail!( "axis ({}) out of accepted range [-rank, rank-1] which was [{}, {}]", axis_i64, -rank_i64, rank_i64 - 1 ) } let axis = input.normalize_axis(axis_i64)?; let keepdims: i64 = get_attr_opt(node, "keepdims")?.copied().unwrap_or(1); let select_last_index: i64 = get_attr_opt(node, "select_last_index")? .copied() .unwrap_or(0); if select_last_index == 1 { bail!("select_last_index for ArgMin is currently not supported") } let output = if keepdims == 1 { input.argmax_keepdim(axis)? } else { input.argmax(axis)? } .to_dtype(DType::I64)?; values.insert(node.output[0].clone(), output); } "LeakyRelu" => { let input = get(&node.input[0])?; let dt = input.dtype(); match dt { DType::U8 | DType::U32 | DType::I64 => { bail!( "unsupported dtype {}, only float types are allowed for LeakyRelu", dt.as_str() ) } DType::BF16 | DType::F16 | DType::F32 | DType::F64 => {} } let alpha = get_attr_opt::<f32>(node, "alpha")?.copied().unwrap_or(0.01); let output = candle_nn::ops::leaky_relu(input, alpha.into())?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Gemm "Gemm" => { let a = get(&node.input[0])?; let b = get(&node.input[1])?; let c = get(&node.input[2])?; let alpha = get_attr_opt::<f32>(node, "alpha")?.copied().unwrap_or(1.0); let beta = get_attr_opt::<f32>(node, "beta")?.copied().unwrap_or(1.0); let alpha = Tensor::full(alpha, a.shape(), &Device::Cpu)?; let beta = Tensor::full(beta, c.shape(), &Device::Cpu)?; let trans_a = get_attr_opt::<i64>(node, "transA")?.copied().unwrap_or(0); let trans_b = get_attr_opt::<i64>(node, "transB")?.copied().unwrap_or(0); let a = if trans_a == 0 { a.clone() } else { a.t()? }; let b = if trans_b == 0 { b.clone() } else { b.t()? }; let output = a .broadcast_mul(&alpha)? .broadcast_matmul(&b)? .broadcast_add(&c.broadcast_mul(&beta)?)?; values.insert(node.output[0].clone(), output); } "LSTM" => { let direction = get_attr_opt(node, "direction")?.unwrap_or("forward"); if direction != "forward" { bail!("LSTM currently only supports direction == \"forward\""); } let num_directions = if direction == "bidirectional" { 2 } else { 1 }; let hidden_size: i64 = get_attr(node, "hidden_size").copied()?; let input_forget = get_attr_opt(node, "input_forget")?.copied().unwrap_or(0); if input_forget != 0 { bail!("LSTM currently only supports input_forget == 0"); } let activations_default = vec![ "Sigmoid".to_string(), "Tanh".to_string(), "Tanh".to_string(), ]; let activations = get_attr_opt_owned::<Vec<String>>(node, "activations")? .unwrap_or(activations_default.clone()); if activations != activations_default { bail!("LSTM currently only supports default activations ({activations_default:?})"); } // activation_alpha and activation_beta don't apply to (Sigmoid, Tanh, Tanh) so ignoring them is okay if get_attr_opt::<f32>(node, "clip")?.is_some() { bail!("LSTM does not currently support clip attribute"); } // The shape format of inputs X, initial_h and outputs Y, Y_h. // If 0, the following shapes are expected: // X.shape = [seq_length, batch_size, input_size], // Y.shape = [seq_length, num_directions, batch_size, hidden_size], // initial_h.shape = Y_h.shape = [num_directions, batch_size, hidden_size]. // If 1, the following shapes are expected: // X.shape = [batch_size, seq_length, input_size], // Y.shape = [batch_size, seq_length, num_directions, hidden_size], // initial_h.shape = Y_h.shape = [batch_size, num_directions, hidden_size]. let layout = get_attr_opt(node, "layout")?.copied().unwrap_or(0); if layout != 0 { bail!("LSTM currently only supports layout == 0"); } // The input sequences packed (and potentially padded) into one 3-D tensor // with the shape of `[seq_length, batch_size, input_size]`. let x = get(&node.input[0])?; // XXX: depends on layout let (seq_length, batch_size, input_size) = x.dims3()?; // The weight tensor for the gates. // Concatenation of `W[iofc]` and `WB[iofc]` (if bidirectional) along dimension 0. // The tensor has shape `[num_directions, 4*hidden_size, input_size]`. let w = get(&node.input[1])?; // The recurrence weight tensor. // Concatenation of `R[iofc]` and `RB[iofc]` (if bidirectional) along dimension 0. // This tensor has shape `[num_directions, 4*hidden_size, hidden_size]`. let r = get(&node.input[2])?; // The bias tensor for input gate. // Concatenation of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along dimension 0. // This tensor has shape `[num_directions, 8*hidden_size]`. // Optional: If not specified - assumed to be 0. let b_default: Tensor; let b = match get_opt(3) { Some(n) => n?, None => { b_default = Tensor::zeros( (num_directions, 8 * hidden_size as usize), DType::F32, x.device(), )?; &b_default } }; // Optional tensor specifying lengths of the sequences in a batch. // If not specified - assumed all sequences in the batch to have length `seq_length`. // It has shape `[batch_size]`. let seq_lens_default: Tensor; let seq_lens = match get_opt(4) { Some(n) => n?, None => { seq_lens_default = Tensor::full(seq_length as i64, (batch_size,), x.device())?; &seq_lens_default } }; let seq_lens_is_default = (seq_lens.to_vec1::<i64>()?.iter()).all(|e| *e as usize == seq_length); if !seq_lens_is_default { bail!("LSTM currently only supports default value of seq_lens"); } // Optional initial value of the hidden. If not specified - assumed to be 0. // It has shape `[num_directions, batch_size, hidden_size]`. let initial_h_default: Tensor; let initial_h = match get_opt(5) { Some(n) => n?, _ => { initial_h_default = Tensor::zeros( (num_directions, batch_size, hidden_size as usize), DType::F32, x.device(), )?; &initial_h_default } }; // Optional initial value of the cell. // If not specified - assumed to be 0. // It has shape `[num_directions, batch_size, hidden_size]`. let initial_c_default: Tensor; let initial_c = match node.input.get(6) { Some(n) if !n.is_empty() => get(n)?, _ => { initial_c_default = Tensor::zeros( (num_directions, batch_size, hidden_size as usize), DType::F32, x.device(), )?; &initial_c_default } }; // The weight tensor for peepholes. // Concatenation of `P[iof]` and `PB[iof]` (if bidirectional) along dimension 0. // It has shape `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed to be 0. let p_default = Tensor::zeros( (num_directions, 3 * hidden_size as usize), DType::F32, x.device(), )?; let p = get_opt(7).unwrap_or(Ok(&p_default))?; let p_is_zeros = (p.to_vec2::<f32>()?.iter()).all(|v| v.iter().all(|e| *e == 0.0)); if !p_is_zeros { bail!( "LSTM currently only supports default value of p (a Tensor of all zeroes)" ); } // these all have [num_directions, ...] shapes let w = w.get(0)?; // w[iofc] has shape [4*hidden_size, input_size] let r = r.get(0)?; // r[iofc] has shape [4*hidden_size, hidden_size] let b = b.get(0)?; // concat of [wb[iofc],rb[iofc]] has shape [8*hidden_size] let idx_wb = Tensor::arange(0, 4 * hidden_size, x.device())?; let idx_rb = Tensor::arange(4 * hidden_size, 8 * hidden_size, x.device())?; let wb = b.index_select(&idx_wb, 0)?; let rb = b.index_select(&idx_rb, 0)?; let c = initial_c.get(0)?; let h = initial_h.get(0)?; // w, r, wb, rb are all iofc but lstm expects ifco // so we need to move some stuff around let idx_i = Tensor::arange(0, hidden_size, x.device())?; let idx_o = Tensor::arange(hidden_size, 2 * hidden_size, x.device())?; let idx_f = Tensor::arange(2 * hidden_size, 3 * hidden_size, x.device())?; let idx_c = Tensor::arange(3 * hidden_size, 4 * hidden_size, x.device())?; let idx_ifco = Tensor::cat(&[&idx_i, &idx_f, &idx_c, &idx_o], 0)?; let w = w.index_select(&idx_ifco, 0)?; let r = r.index_select(&idx_ifco, 0)?; let wb = wb.index_select(&idx_ifco, 0)?; let rb = rb.index_select(&idx_ifco, 0)?; let vmap = candle_nn::VarMap::new(); vmap.data().lock().unwrap().extend([ ("weight_ih_l0".to_string(), candle::Var::from_tensor(&w)?), ("weight_hh_l0".to_string(), candle::Var::from_tensor(&r)?), ("bias_ih_l0".to_string(), candle::Var::from_tensor(&wb)?), ("bias_hh_l0".to_string(), candle::Var::from_tensor(&rb)?), ]); use candle_nn::rnn::RNN as _; let lstm = candle_nn::rnn::lstm( input_size, hidden_size as usize, candle_nn::rnn::LSTMConfig::default(), candle_nn::VarBuilder::from_varmap(&vmap, w.dtype(), w.device()), )?; let mut lstm_state = candle_nn::rnn::LSTMState::new(h, c); let mut h_acc = if node.output.first().map(String::as_str).unwrap_or("") != "" { Some(vec![]) } else { None }; for t in 0..seq_length { let x = x.get(t)?; lstm_state = lstm.step(&x, &lstm_state)?; if let Some(h_acc) = &mut h_acc { h_acc.push(lstm_state.clone()); } } assert_eq!(num_directions, 1, "if support for bidirectional is ever added, outputs will have to be concatenated, not simply reshaped"); if let Some(name) = node.output.first() { let h_acc = h_acc.as_ref().unwrap(); let h_acc = lstm.states_to_tensor(h_acc)?; let h_acc = h_acc.reshape(( seq_length, num_directions, batch_size, hidden_size as usize, ))?; values.insert(name.clone(), h_acc); } if let Some(name) = node.output.get(1) { values.insert( name.clone(), lstm_state.h().reshape(( num_directions, batch_size, hidden_size as usize, ))?, ); } if let Some(name) = node.output.get(2) { values.insert( name.clone(), lstm_state.c().reshape(( num_directions, batch_size, hidden_size as usize, ))?, ); } } // https://onnx.ai/onnx/operators/onnx__Xor.html "Xor" => { // Since we don't have a `DType::Bool` yet, this ensures that we are working with `0`(False) & `1`(True) let a = get(&node.input[0])?.gt(0_u8)?; let b = get(&node.input[1])?.gt(0_u8)?; let out = a.broadcast_add(&b)?.eq(1_u8)?; values.insert(node.output[0].clone(), out); } // https://onnx.ai/onnx/operators/onnx__Sign.html "Sign" => { let input = get(&node.input[0])?; let output = input.sign()?; values.insert(node.output[0].clone(), output); } op_type => bail!("unsupported op_type {op_type} for op {node:?}"), } } graph .output .iter() .map(|output| match values.remove(&output.name) { None => bail!("cannot find output {}", output.name), Some(value) => Ok((output.name.clone(), value)), }) .collect() } fn broadcast_shape(shape_a: &[usize], shape_b: &[usize]) -> Result<Vec<usize>> { let (longest, shortest) = if shape_a.len() > shape_b.len() { (shape_a, shape_b) } else { (shape_b, shape_a) }; let diff = longest.len() - shortest.len(); let mut target_shape = longest[0..diff].to_vec(); for (dim1, dim2) in longest[diff..].iter().zip(shortest.iter()) { if *dim1 == *dim2 || *dim2 == 1 || *dim1 == 1 { target_shape.push(usize::max(*dim1, *dim2)); } else { bail!( "Expand: incompatible shapes for broadcast, {:?} and {:?}", shape_a, shape_b ); } } Ok(target_shape) } fn broadcast_shape_from_many(shapes: &[&[usize]]) -> Result<Vec<usize>> { if shapes.is_empty() { return Ok(Vec::new()); } let mut shape_out = shapes[0].to_vec(); for shape in shapes[1..].iter() { shape_out = broadcast_shape(&shape_out, shape)?; } Ok(shape_out) }
candle/candle-onnx/src/eval.rs/0
{ "file_path": "candle/candle-onnx/src/eval.rs", "repo_id": "candle", "token_count": 53114 }
import candle from typing import Dict, Tuple, Any from candle import Tensor, QTensor, utils, nn from candle.nn import Module, ModuleList def masked_fill(on_false: Tensor, mask: Tensor, on_true: Tensor): shape = mask.shape on_true = candle.tensor(on_true).broadcast_as(shape) return mask.where_cond(on_true, on_false) def precompute_freqs_cis(hparams: Dict[str, Any], freq_base: float, max_seq_len: int): head_dim = hparams["n_embd"] // hparams["n_head"] theta = [1.0 / freq_base ** (i / head_dim) for i in range(0, head_dim, 2)] theta = candle.tensor(theta) idx_theta = [float(i) for i in range(max_seq_len)] idx_theta = candle.tensor(idx_theta).reshape((max_seq_len, 1)) m = idx_theta.matmul(theta.unsqueeze(0)) return (m.cos(), m.sin()) class RmsNorm(Module): def __init__(self, qtensor: QTensor): super().__init__() self.weight = qtensor.dequantize() def forward(self, x: Tensor) -> Tensor: b_size, seq_len, hidden_size = x.shape norm_x = x.sqr().sum_keepdim(2) / hidden_size x_normed = x.broadcast_div((norm_x + 1e-5).sqrt()) return x_normed.broadcast_mul(self.weight) class QuantizedLayer(Module): def __init__( self, layer_idx: int, hparams: Dict[str, Any], all_tensors: Dict[str, QTensor], cos_sin: Tuple[Tensor, Tensor], ): super().__init__() p = f"layers.{layer_idx}" self.attention_wq = all_tensors[f"{p}.attention.wq.weight"] self.attention_wk = all_tensors[f"{p}.attention.wk.weight"] self.attention_wv = all_tensors[f"{p}.attention.wv.weight"] self.attention_wo = all_tensors[f"{p}.attention.wo.weight"] self.ffw1 = all_tensors[f"{p}.feed_forward.w1.weight"] self.ffw2 = all_tensors[f"{p}.feed_forward.w2.weight"] self.ffw3 = all_tensors[f"{p}.feed_forward.w3.weight"] self.attn_norm = RmsNorm(all_tensors[f"{p}.attention_norm.weight"]) self.ffn_norm = RmsNorm(all_tensors[f"{p}.ffn_norm.weight"]) self.n_head = hparams["n_head"] self.n_kv_head = self.n_head self.head_dim = hparams["n_embd"] // self.n_head self.kv_cache = None self.cos = cos_sin[0] self.sin = cos_sin[1] self._non_persistent_buffers_set.add("cos") self._non_persistent_buffers_set.add("sin") def forward(self, x: Tensor, mask: Tensor, index_pos: int) -> Tensor: residual = x x = self.attn_norm(x) attn = self.forward_attn(x, mask, index_pos) x = attn + residual residual = x x = self.ffn_norm(x) w1 = self.ffw1.matmul_t(x) w3 = self.ffw3.matmul_t(x) mlp = self.ffw2.matmul_t(nn.silu(w1) * w3) return mlp + residual def forward_attn(self, x: Tensor, mask: Tensor, index_pos: int): b_size, seq_len, n_embd = x.shape q = self.attention_wq.matmul_t(x) k = self.attention_wk.matmul_t(x) v = self.attention_wv.matmul_t(x) q = q.reshape((b_size, seq_len, self.n_head, self.head_dim)).transpose(1, 2) k = k.reshape((b_size, seq_len, self.n_kv_head, self.head_dim)).transpose(1, 2) v = v.reshape((b_size, seq_len, self.n_kv_head, self.head_dim)).transpose(1, 2) q = self.apply_rotary_emb(q, index_pos) k = self.apply_rotary_emb(k, index_pos) if self.kv_cache is not None and index_pos > 0: prev_k, prev_v = self.kv_cache k = candle.cat([prev_k, k], 2).contiguous() v = candle.cat([prev_v, v], 2).contiguous() self.kv_cache = (k, v) # TODO: maybe repeat k/v here if we start supporting MQA. att = q.matmul(k.t()) / self.head_dim**0.5 mask = mask.broadcast_as(att.shape) att = masked_fill(att, mask, float("-inf")) att = nn.softmax(att, -1) y = att.matmul(v.contiguous()) y = y.transpose(1, 2).reshape((b_size, seq_len, n_embd)) return self.attention_wo.matmul_t(y) def apply_rotary_emb(self, x: Tensor, index_pos: int): b_size, n_head, seq_len, n_embd = x.shape cos = self.cos.narrow(0, index_pos, seq_len).reshape((seq_len, n_embd // 2, 1)) sin = self.sin.narrow(0, index_pos, seq_len).reshape((seq_len, n_embd // 2, 1)) x = x.reshape((b_size, n_head, seq_len, n_embd // 2, 2)) x0 = x.narrow(-1, 0, 1) x1 = x.narrow(-1, 1, 1) y0 = x0.broadcast_mul(cos) - x1.broadcast_mul(sin) y1 = x0.broadcast_mul(sin) + x1.broadcast_mul(cos) rope = candle.cat([y0, y1], -1) return rope.flatten_from(-2) class QuantizedLlama(Module): def __init__(self, hparams: Dict[str, Any], all_tensors: Dict[str, QTensor]): super().__init__() self.tok_embeddings = all_tensors["tok_embeddings.weight"].dequantize() self.norm = RmsNorm(all_tensors["norm.weight"]) self.output = all_tensors["output.weight"] self.layers = ModuleList() rope_freq = hparams.get("rope_freq", 10000.0) cos_sin = precompute_freqs_cis(hparams, rope_freq, hparams["context_length"]) for layer_idx in range(hparams["n_layer"]): layer = QuantizedLayer(layer_idx, hparams, all_tensors, cos_sin) self.layers.append(layer) def forward(self, token: Tensor, index_pos: int) -> Tensor: b_size, seq_len = token.shape vocab_size, hidden_size = self.tok_embeddings.shape token = token.reshape((b_size * seq_len,)) x = self.tok_embeddings.index_select(token, 0) x = x.reshape((b_size, seq_len, hidden_size)) mask = [int(j > i) for j in range(seq_len) for i in range(seq_len)] mask = candle.tensor(mask).reshape((seq_len, seq_len)) for layer in self.layers: x = layer(x, mask, index_pos) x = self.norm(x) x = x.narrow(1, -1, 1).squeeze(1) x = self.output.matmul_t(x) return x
candle/candle-pyo3/py_src/candle/models/llama.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/models/llama.py", "repo_id": "candle", "token_count": 2981 }
#![allow(clippy::redundant_closure_call)] use pyo3::exceptions::{PyTypeError, PyValueError}; use pyo3::prelude::*; use pyo3::pyclass::CompareOp; use pyo3::types::{IntoPyDict, PyDict, PyTuple}; use pyo3::ToPyObject; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; use std::sync::Arc; use half::{bf16, f16}; #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use ::candle::{quantized::QTensor, DType, Device, Module, Tensor, WithDType}; mod utils; use utils::wrap_err; mod shape; use shape::{PyShape, PyShapeWithHole}; #[cfg(feature = "onnx")] mod onnx; #[derive(Clone, Debug)] #[pyclass(name = "Tensor")] /// A `candle` tensor. struct PyTensor(Tensor); impl std::ops::Deref for PyTensor { type Target = Tensor; fn deref(&self) -> &Self::Target { &self.0 } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[pyclass(name = "DType")] /// A `candle` dtype. struct PyDType(DType); #[pymethods] impl PyDType { fn __repr__(&self) -> String { format!("{:?}", self.0) } fn __str__(&self) -> String { self.__repr__() } } impl PyDType { fn from_pyobject(ob: PyObject, py: Python<'_>) -> PyResult<Self> { use std::str::FromStr; if let Ok(dtype) = ob.extract::<String>(py) { let dtype = DType::from_str(&dtype) .map_err(|_| PyTypeError::new_err(format!("invalid dtype '{dtype}'")))?; Ok(Self(dtype)) } else { ob.extract(py) } } } static CUDA_DEVICE: std::sync::Mutex<Option<Device>> = std::sync::Mutex::new(None); static METAL_DEVICE: std::sync::Mutex<Option<Device>> = std::sync::Mutex::new(None); #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum PyDevice { Cpu, Cuda, Metal, } impl PyDevice { fn from_device(device: &Device) -> Self { match device { Device::Cpu => Self::Cpu, Device::Cuda(_) => Self::Cuda, Device::Metal(_) => Self::Metal, } } fn as_device(&self) -> PyResult<Device> { match self { Self::Cpu => Ok(Device::Cpu), Self::Cuda => { let mut device = CUDA_DEVICE.lock().unwrap(); if let Some(device) = device.as_ref() { return Ok(device.clone()); }; let d = Device::new_cuda(0).map_err(wrap_err)?; *device = Some(d.clone()); Ok(d) } Self::Metal => { let mut device = METAL_DEVICE.lock().unwrap(); if let Some(device) = device.as_ref() { return Ok(device.clone()); }; let d = Device::new_metal(0).map_err(wrap_err)?; *device = Some(d.clone()); Ok(d) } } } } impl<'source> FromPyObject<'source> for PyDevice { fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> { let device: String = ob.extract()?; let device = match device.as_str() { "cpu" => PyDevice::Cpu, "cuda" => PyDevice::Cuda, _ => Err(PyTypeError::new_err(format!("invalid device '{device}'")))?, }; Ok(device) } } impl ToPyObject for PyDevice { fn to_object(&self, py: Python<'_>) -> PyObject { let str = match self { PyDevice::Cpu => "cpu", PyDevice::Cuda => "cuda", PyDevice::Metal => "metal", }; str.to_object(py) } } trait PyWithDType: WithDType { fn to_py(&self, py: Python<'_>) -> PyObject; } macro_rules! pydtype { ($ty:ty, $conv:expr) => { impl PyWithDType for $ty { fn to_py(&self, py: Python<'_>) -> PyObject { $conv(*self).to_object(py) } } }; } pydtype!(i64, |v| v); pydtype!(u8, |v| v); pydtype!(u32, |v| v); pydtype!(f16, f32::from); pydtype!(bf16, f32::from); pydtype!(f32, |v| v); pydtype!(f64, |v| v); fn actual_index(t: &Tensor, dim: usize, index: i64) -> ::candle::Result<usize> { let dim = t.dim(dim)?; if 0 <= index { let index = index as usize; if dim <= index { ::candle::bail!("index {index} is too large for tensor dimension {dim}") } Ok(index) } else { if (dim as i64) < -index { ::candle::bail!("index {index} is too low for tensor dimension {dim}") } Ok((dim as i64 + index) as usize) } } fn actual_dim(t: &Tensor, dim: i64) -> ::candle::Result<usize> { let rank = t.rank(); if 0 <= dim { let dim = dim as usize; if rank <= dim { ::candle::bail!("dimension index {dim} is too large for tensor rank {rank}") } Ok(dim) } else { if (rank as i64) < -dim { ::candle::bail!("dimension index {dim} is too low for tensor rank {rank}") } Ok((rank as i64 + dim) as usize) } } // TODO: Something similar to this should probably be a part of candle core. trait MapDType { type Output; fn f<T: PyWithDType>(&self, t: &Tensor) -> PyResult<Self::Output>; fn map(&self, t: &Tensor) -> PyResult<Self::Output> { match t.dtype() { DType::U8 => self.f::<u8>(t), DType::U32 => self.f::<u32>(t), DType::I64 => self.f::<i64>(t), DType::BF16 => self.f::<bf16>(t), DType::F16 => self.f::<f16>(t), DType::F32 => self.f::<f32>(t), DType::F64 => self.f::<f64>(t), } } } enum Indexer { Index(usize), Slice(usize, usize), Ellipsis, Expand, IndexSelect(Tensor), } #[derive(Debug)] struct TorchTensor(PyObject); impl<'source> pyo3::FromPyObject<'source> for TorchTensor { fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> { let numpy_value: PyObject = ob.getattr("numpy")?.call0()?.extract()?; Ok(TorchTensor(numpy_value)) } } #[pymethods] impl PyTensor { #[new] #[pyo3(text_signature = "(self, data:_ArrayLike)")] // TODO: Handle arbitrary input dtype and shape. /// Creates a new tensor from a Python value. The value can be a scalar or array-like object. fn new(py: Python<'_>, data: PyObject) -> PyResult<Self> { use Device::Cpu; let tensor = if let Ok(vs) = data.extract::<u32>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<i64>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<f32>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<u32>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<i64>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<f32>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<u32>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<i64>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<f32>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<u32>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<i64>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<f32>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(TorchTensor(numpy)) = data.extract::<TorchTensor>(py) { return PyTensor::new(py, numpy); } else { let ty = data.bind(py).get_type(); Err(PyTypeError::new_err(format!( "incorrect type {ty} for tensor" )))? }; Ok(Self(tensor)) } /// Gets the tensor's data as a Python scalar or array-like object. /// &RETURNS&: _ArrayLike fn values(&self, py: Python<'_>) -> PyResult<PyObject> { struct M<'a>(Python<'a>); impl MapDType for M<'_> { type Output = PyObject; fn f<T: PyWithDType>(&self, t: &Tensor) -> PyResult<Self::Output> { match t.rank() { 0 => Ok(t.to_scalar::<T>().map_err(wrap_err)?.to_py(self.0)), 1 => { let v = t.to_vec1::<T>().map_err(wrap_err)?; let v = v.iter().map(|v| v.to_py(self.0)).collect::<Vec<_>>(); Ok(v.to_object(self.0)) } 2 => { let v = t.to_vec2::<T>().map_err(wrap_err)?; let v = v .iter() .map(|v| v.iter().map(|v| v.to_py(self.0)).collect()) .collect::<Vec<Vec<_>>>(); Ok(v.to_object(self.0)) } 3 => { let v = t.to_vec3::<T>().map_err(wrap_err)?; let v = v .iter() .map(|v| { v.iter() .map(|v| v.iter().map(|v| v.to_py(self.0)).collect()) .collect() }) .collect::<Vec<Vec<Vec<_>>>>(); Ok(v.to_object(self.0)) } n => Err(PyTypeError::new_err(format!( "TODO: conversion to PyObject is not handled for rank {n}" )))?, } } } // TODO: Handle arbitrary shapes. M(py).map(self) } /// Converts candle's tensor to pytorch's tensor /// &RETURNS&: torch.Tensor fn to_torch(&self, py: Python<'_>) -> PyResult<PyObject> { let candle_values = self.values(py)?; let torch_tensor: PyObject = py .import_bound("torch")? .getattr("tensor")? .call1((candle_values,))? .extract()?; Ok(torch_tensor) } #[getter] /// Gets the tensor's shape. /// &RETURNS&: Tuple[int] fn shape(&self, py: Python<'_>) -> PyObject { PyTuple::new_bound(py, self.0.dims()).to_object(py) } #[getter] /// Gets the tensor's element count. /// &RETURNS&: int fn nelement(&self) -> usize { self.0.elem_count() } #[getter] /// Gets the tensor's strides. /// &RETURNS&: Tuple[int] fn stride(&self, py: Python<'_>) -> PyObject { PyTuple::new_bound(py, self.0.stride()).to_object(py) } #[getter] /// Gets the tensor's dtype. /// &RETURNS&: DType fn dtype(&self) -> PyDType { PyDType(self.0.dtype()) } #[getter] /// Gets the tensor's device. /// &RETURNS&: Device fn device(&self, py: Python<'_>) -> PyObject { PyDevice::from_device(self.0.device()).to_object(py) } #[getter] /// Gets the tensor's rank. /// &RETURNS&: int fn rank(&self) -> usize { self.0.rank() } fn __repr__(&self) -> String { format!("{}", self.0) } fn __str__(&self) -> String { self.__repr__() } /// Performs the `abs` operation on the tensor. /// &RETURNS&: Tensor fn abs(&self) -> PyResult<Self> { Ok(PyTensor(self.0.abs().map_err(wrap_err)?)) } /// Performs the `sin` operation on the tensor. /// &RETURNS&: Tensor fn sin(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sin().map_err(wrap_err)?)) } /// Performs the `cos` operation on the tensor. /// &RETURNS&: Tensor fn cos(&self) -> PyResult<Self> { Ok(PyTensor(self.0.cos().map_err(wrap_err)?)) } /// Performs the `log` operation on the tensor. /// &RETURNS&: Tensor fn log(&self) -> PyResult<Self> { Ok(PyTensor(self.0.log().map_err(wrap_err)?)) } /// Squares the tensor. /// &RETURNS&: Tensor fn sqr(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sqr().map_err(wrap_err)?)) } /// Calculates the square root of the tensor. /// &RETURNS&: Tensor fn sqrt(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sqrt().map_err(wrap_err)?)) } /// Get the `recip` of the tensor. /// &RETURNS&: Tensor fn recip(&self) -> PyResult<Self> { Ok(PyTensor(self.0.recip().map_err(wrap_err)?)) } /// Performs the `exp` operation on the tensor. /// &RETURNS&: Tensor fn exp(&self) -> PyResult<Self> { Ok(PyTensor(self.0.exp().map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, p:float)")] /// Performs the `pow` operation on the tensor with the given exponent. /// &RETURNS&: Tensor fn powf(&self, p: f64) -> PyResult<Self> { Ok(PyTensor(self.0.powf(p).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor, dim:int)")] /// Select values for the input tensor at the target indexes across the specified dimension. /// /// The `indexes` is argument is an int tensor with a single dimension. /// The output has the same number of dimension as the `self` input. The target dimension of /// the output has length the length of `indexes` and the values are taken from `self` using /// the index from `indexes`. Other dimensions have the same number of elements as the input /// tensor. /// &RETURNS&: Tensor fn index_select(&self, rhs: &Self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.index_select(rhs, dim).map_err(wrap_err)?)) } /// Gathers values along an axis specified by dim. fn gather(&self, index: &Self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.gather(index, dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Performs a matrix multiplication between the two tensors. /// &RETURNS&: Tensor fn matmul(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.matmul(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Adds the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_add(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_add(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Subtracts the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_sub(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_sub(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Multiplies the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_mul(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_mul(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Divides the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_div(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_div(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, on_true:Tensor, on_false:Tensor)")] /// Returns a tensor with the same shape as the input tensor, the values are taken from /// `on_true` if the input tensor value is not zero, and `on_false` at the positions where the /// input tensor is equal to zero. /// &RETURNS&: Tensor fn where_cond(&self, on_true: &Self, on_false: &Self) -> PyResult<Self> { Ok(PyTensor( self.0.where_cond(on_true, on_false).map_err(wrap_err)?, )) } #[getter] /// Index a tensor. /// &RETURNS&: Tensor fn __getitem__(&self, py: Python, idx: PyObject) -> PyResult<Self> { let mut indexers: Vec<Indexer> = vec![]; let dims = self.0.shape().dims(); fn to_absolute_index(index: isize, current_dim: usize, dims: &[usize]) -> PyResult<usize> { // Convert a relative index to an absolute index e.g. tensor[-1] -> tensor[0] let actual_index = if index < 0 { dims[current_dim] as isize + index } else { index }; // Check that the index is in range if actual_index < 0 || actual_index >= dims[current_dim] as isize { return Err(PyValueError::new_err(format!( "index out of range for dimension '{i}' with indexer '{value}'", i = current_dim, value = index ))); } Ok(actual_index as usize) } fn extract_indexer( py_indexer: &Bound<PyAny>, current_dim: usize, dims: &[usize], index_argument_count: usize, ) -> PyResult<(Indexer, usize)> { if let Ok(index) = py_indexer.extract() { // Handle a single index e.g. tensor[0] or tensor[-1] Ok(( Indexer::Index(to_absolute_index(index, current_dim, dims)?), current_dim + 1, )) } else if let Ok(slice) = py_indexer.downcast::<pyo3::types::PySlice>() { // Handle a single slice e.g. tensor[0:1] or tensor[0:-1] let index = slice.indices(dims[current_dim] as isize)?; Ok(( Indexer::Slice(index.start as usize, index.stop as usize), current_dim + 1, )) } else if let Ok(tensor) = py_indexer.extract::<PyTensor>() { // Handle a tensor as indices e.g. tensor[tensor([0,1])] let t = tensor.0; if t.rank() != 1 { return Err(PyTypeError::new_err( "multi-dimensional tensor indexing is not supported", )); } Ok((Indexer::IndexSelect(t), current_dim + 1)) } else if let Ok(list) = py_indexer.downcast::<pyo3::types::PyList>() { // Handle a list of indices e.g. tensor[[0,1]] let mut indexes = vec![]; for item in list.iter() { let index = item.extract::<i64>()?; indexes.push(index); } Ok(( Indexer::IndexSelect( Tensor::from_vec(indexes, list.len(), &Device::Cpu).map_err(wrap_err)?, ), current_dim + 1, )) } else if py_indexer.is(&py_indexer.py().Ellipsis()) { // Handle '...' e.g. tensor[..., 0] if current_dim > 0 { return Err(PyTypeError::new_err( "Ellipsis ('...') can only be used at the start of an indexing operation", )); } Ok((Indexer::Ellipsis, dims.len() - (index_argument_count - 1))) } else if py_indexer.is_none() { // Handle None e.g. tensor[None, 0] Ok((Indexer::Expand, current_dim)) } else { Err(PyTypeError::new_err(format!( "unsupported indexer {}", py_indexer ))) } } if let Ok(tuple) = idx.downcast_bound::<pyo3::types::PyTuple>(py) { let not_none_count: usize = tuple.iter().filter(|x| !x.is_none()).count(); if not_none_count > dims.len() { return Err(PyValueError::new_err("provided too many indices")); } let mut current_dim = 0; for item in tuple.iter() { let (indexer, new_current_dim) = extract_indexer(&item, current_dim, dims, not_none_count)?; current_dim = new_current_dim; indexers.push(indexer); } } else { let (indexer, _) = extract_indexer(idx.downcast_bound::<PyAny>(py)?, 0, dims, 1)?; indexers.push(indexer); } let mut x = self.0.clone(); let mut current_dim = 0; // Apply the indexers for indexer in indexers.iter() { x = match indexer { Indexer::Index(n) => x .narrow(current_dim, *n, 1) .map_err(wrap_err)? .squeeze(current_dim) .map_err(wrap_err)?, Indexer::Slice(start, stop) => { let out = x .narrow(current_dim, *start, stop.saturating_sub(*start)) .map_err(wrap_err)?; current_dim += 1; out } Indexer::Ellipsis => { // Ellipsis is a special case, it means that all remaining dimensions should be // selected => advance the current_dim to the last dimension we have indexers for current_dim += dims.len() - (indexers.len() - 1); x } Indexer::Expand => { // Expand is a special case, it means that a new dimension should be added => unsqueeze and advance the current_dim let out = x.unsqueeze(current_dim).map_err(wrap_err)?; current_dim += 1; out } Indexer::IndexSelect(indexes) => { let out = x .index_select( &indexes.to_device(x.device()).map_err(wrap_err)?, current_dim, ) .map_err(wrap_err)?; current_dim += 1; out } } } Ok(Self(x)) } /// Add two tensors. /// &RETURNS&: Tensor fn __add__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_add(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 + rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for add"))? }; Ok(Self(tensor)) } fn __radd__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { self.__add__(rhs) } /// Multiply two tensors. /// &RETURNS&: Tensor fn __mul__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_mul(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 * rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for mul"))? }; Ok(Self(tensor)) } fn __rmul__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { self.__mul__(rhs) } /// Subtract two tensors. /// &RETURNS&: Tensor fn __sub__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_sub(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 - rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for sub"))? }; Ok(Self(tensor)) } /// Divide two tensors. /// &RETURNS&: Tensor fn __truediv__(&self, rhs: &Bound<PyAny>) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_div(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 / rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for div"))? }; Ok(Self(tensor)) } /// Rich-compare two tensors. /// &RETURNS&: Tensor fn __richcmp__(&self, rhs: &Bound<PyAny>, op: CompareOp) -> PyResult<Self> { let compare = |lhs: &Tensor, rhs: &Tensor| { let t = match op { CompareOp::Eq => lhs.eq(rhs), CompareOp::Ne => lhs.ne(rhs), CompareOp::Lt => lhs.lt(rhs), CompareOp::Le => lhs.le(rhs), CompareOp::Gt => lhs.gt(rhs), CompareOp::Ge => lhs.ge(rhs), }; Ok(PyTensor(t.map_err(wrap_err)?)) }; if let Ok(rhs) = rhs.extract::<PyTensor>() { if self.0.shape() == rhs.0.shape() { compare(&self.0, &rhs.0) } else { // We broadcast manually here because `candle.cmp` does not support automatic broadcasting let broadcast_shape = self .0 .shape() .broadcast_shape_binary_op(rhs.0.shape(), "cmp") .map_err(wrap_err)?; let broadcasted_lhs = self.0.broadcast_as(&broadcast_shape).map_err(wrap_err)?; let broadcasted_rhs = rhs.0.broadcast_as(&broadcast_shape).map_err(wrap_err)?; compare(&broadcasted_lhs, &broadcasted_rhs) } } else if let Ok(rhs) = rhs.extract::<f64>() { let scalar_tensor = Tensor::new(rhs, self.0.device()) .map_err(wrap_err)? .to_dtype(self.0.dtype()) .map_err(wrap_err)? .broadcast_as(self.0.shape()) .map_err(wrap_err)?; compare(&self.0, &scalar_tensor) } else { return Err(PyTypeError::new_err("unsupported rhs for __richcmp__")); } } fn __hash__(&self) -> u64 { // we have overridden __richcmp__ => py03 wants us to also override __hash__ // we simply hash the address of the tensor let mut hasher = DefaultHasher::new(); let pointer = &self.0 as *const Tensor; let address = pointer as usize; address.hash(&mut hasher); hasher.finish() } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Reshapes the tensor to the given shape. /// &RETURNS&: Tensor fn reshape(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .reshape(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Broadcasts the tensor to the given shape. /// &RETURNS&: Tensor fn broadcast_as(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .broadcast_as(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Broadcasts the tensor to the given shape, adding new dimensions on the left. /// &RETURNS&: Tensor fn broadcast_left(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .broadcast_left(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(text_signature = "(self, dim:int)")] /// Creates a new tensor with the specified dimension removed if its size was one. /// &RETURNS&: Tensor fn squeeze(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.squeeze(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Creates a new tensor with a dimension of size one inserted at the specified position. /// &RETURNS&: Tensor fn unsqueeze(&self, dim: usize) -> PyResult<Self> { Ok(PyTensor(self.0.unsqueeze(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, index:int)")] /// Gets the value at the specified index. /// &RETURNS&: Tensor fn get(&self, index: i64) -> PyResult<Self> { let index = actual_index(self, 0, index).map_err(wrap_err)?; Ok(PyTensor(self.0.get(index).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim1:int, dim2:int)")] /// Returns a tensor that is a transposed version of the input, the given dimensions are swapped. /// &RETURNS&: Tensor fn transpose(&self, dim1: usize, dim2: usize) -> PyResult<Self> { Ok(PyTensor(self.0.transpose(dim1, dim2).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int, start:int, len:int)")] /// Returns a new tensor that is a narrowed version of the input, the dimension `dim` /// ranges from `start` to `start + len`. /// &RETURNS&: Tensor fn narrow(&self, dim: i64, start: i64, len: usize) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; let start = actual_index(self, dim, start).map_err(wrap_err)?; Ok(PyTensor(self.0.narrow(dim, start, len).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Returns the indices of the maximum value(s) across the selected dimension. /// &RETURNS&: Tensor fn argmax_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.argmax_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Returns the indices of the minimum value(s) across the selected dimension. /// &RETURNS&: Tensor fn argmin_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.argmin_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Gathers the maximum value across the selected dimension. /// &RETURNS&: Tensor fn max_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.max_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Gathers the minimum value across the selected dimension. /// &RETURNS&: Tensor fn min_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.min_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:Union[int, List[int]])")] /// Returns the sum of all elements in the input tensor. The sum is performed over all the input dimensions. /// &RETURNS&: Tensor fn sum_keepdim(&self, dims: PyObject, py: Python<'_>) -> PyResult<Self> { let dims = if let Ok(dim) = dims.extract::<usize>(py) { vec![dim] } else { dims.extract::<Vec<usize>>(py)? }; Ok(PyTensor( self.0.sum_keepdim(dims.as_slice()).map_err(wrap_err)?, )) } /// Returns the sum of the tensor. /// &RETURNS&: Tensor fn sum_all(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sum_all().map_err(wrap_err)?)) } /// Returns the mean of the tensor. /// &RETURNS&: Tensor fn mean_all(&self) -> PyResult<Self> { let elements = self.0.elem_count(); let sum = self.0.sum_all().map_err(wrap_err)?; let mean = (sum / elements as f64).map_err(wrap_err)?; Ok(PyTensor(mean)) } #[pyo3(text_signature = "(self, dim:int)")] /// Flattens the tensor on the dimension indexes from `dim` (inclusive) to the last dimension. /// &RETURNS&: Tensor fn flatten_from(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.flatten_from(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] ///Flattens the tensor on the dimension indexes from `0` to `dim` (inclusive). /// &RETURNS&: Tensor fn flatten_to(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.flatten_to(dim).map_err(wrap_err)?)) } /// Flattens the tensor into a 1D tensor. /// &RETURNS&: Tensor fn flatten_all(&self) -> PyResult<Self> { Ok(PyTensor(self.0.flatten_all().map_err(wrap_err)?)) } /// Transposes the tensor. /// &RETURNS&: Tensor fn t(&self) -> PyResult<Self> { Ok(PyTensor(self.0.t().map_err(wrap_err)?)) } /// Makes the tensor contiguous in memory. /// &RETURNS&: Tensor fn contiguous(&self) -> PyResult<Self> { Ok(PyTensor(self.0.contiguous().map_err(wrap_err)?)) } /// Returns true if the tensor is contiguous in C order. /// &RETURNS&: bool fn is_contiguous(&self) -> bool { self.0.is_contiguous() } /// Returns true if the tensor is contiguous in Fortran order. /// &RETURNS&: bool fn is_fortran_contiguous(&self) -> bool { self.0.is_fortran_contiguous() } /// Detach the tensor from the computation graph. /// &RETURNS&: Tensor fn detach(&self) -> Self { PyTensor(self.0.detach()) } /// Returns a copy of the tensor. /// &RETURNS&: Tensor fn copy(&self) -> PyResult<Self> { Ok(PyTensor(self.0.copy().map_err(wrap_err)?)) } #[pyo3(signature = (*args, **kwargs), text_signature = "(self, *args, **kwargs)")] /// Performs Tensor dtype and/or device conversion. /// &RETURNS&: Tensor fn to(&self, args: &Bound<PyTuple>, kwargs: Option<&Bound<PyDict>>) -> PyResult<Self> { let mut device: Option<PyDevice> = None; let mut dtype: Option<PyDType> = None; let mut other: Option<PyTensor> = None; fn handle_duplicates<T>( opt: &mut Option<T>, extraction_result: PyResult<T>, err_msg: &'static str, ) -> PyResult<()> { if let Ok(successful_extraction) = extraction_result { if opt.is_some() { return Err(PyValueError::new_err(err_msg)); } *opt = Some(successful_extraction); } Ok(()) } //handle args for arg in args.iter() { if arg.extract::<PyDevice>().is_ok() { handle_duplicates( &mut device, arg.extract::<PyDevice>(), "cannot specify multiple devices", )?; } else if arg.extract::<PyDType>().is_ok() { handle_duplicates( &mut dtype, arg.extract::<PyDType>(), "cannot specify multiple dtypes", )?; } else if arg.extract::<PyTensor>().is_ok() { handle_duplicates( &mut other, arg.extract::<PyTensor>(), "cannot specify multiple output tensors", )?; } else { return Err(PyTypeError::new_err(format!( "unsupported argument type `{:#?}`", arg.get_type().name() ))); } } if let Some(kwargs) = kwargs { if let Ok(Some(any)) = kwargs.get_item("dtype") { handle_duplicates( &mut dtype, any.extract::<PyDType>(), "cannot specify multiple dtypes", )?; } if let Ok(Some(any)) = kwargs.get_item("device") { handle_duplicates( &mut device, any.extract::<PyDevice>(), "cannot specify multiple devices", )?; } if let Ok(Some(any)) = kwargs.get_item("other") { handle_duplicates( &mut other, any.extract::<PyTensor>(), "cannot specify multiple output tensors", )?; } } if let Some(other) = other { if device.is_some() { return Err(PyValueError::new_err( "cannot specify both an output tensor and a device", )); } if dtype.is_some() { return Err(PyValueError::new_err( "cannot specify both an output tensor and a dtype", )); } dtype = Some(other.dtype()); device = Some(PyDevice::from_device(other.0.device())); } let result = match (device, dtype) { (Some(device), Some(dtype)) => self .0 .to_device(&device.as_device()?) .map_err(wrap_err)? .to_dtype(dtype.0) .map_err(wrap_err)?, (Some(device), None) => self.0.to_device(&device.as_device()?).map_err(wrap_err)?, (None, Some(dtype)) => self.0.to_dtype(dtype.0).map_err(wrap_err)?, (None, None) => return Err(PyTypeError::new_err("No valid dtype or device specified")), }; Ok(PyTensor(result)) } #[pyo3(text_signature = "(self, dtype:Union[str,DType])")] /// Convert the tensor to a new dtype. /// &RETURNS&: Tensor fn to_dtype(&self, dtype: PyObject, py: Python<'_>) -> PyResult<Self> { let dtype = PyDType::from_pyobject(dtype, py)?; Ok(PyTensor(self.0.to_dtype(dtype.0).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, device:Union[str,Device])")] /// Move the tensor to a new device. /// &RETURNS&: Tensor fn to_device(&self, device: PyDevice) -> PyResult<Self> { let device = device.as_device()?; Ok(PyTensor(self.0.to_device(&device).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, quantized_dtype:str)")] /// Quantize the tensor. /// &RETURNS&: QTensor fn quantize(&self, quantized_dtype: &str) -> PyResult<PyQTensor> { use ::candle::quantized; let res = match quantized_dtype.to_lowercase().as_str() { "q2k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q2K), "q3k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q3K), "q4_0" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q4_0), "q4_1" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q4_1), "q4k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q4K), "q5_0" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q5_0), "q5_1" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q5_1), "q5k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q5K), "q6k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q6K), "q8_0" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q8_0), "q8_1" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q8_1), "q8k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q8K), "f16" => quantized::QTensor::quantize(self, quantized::GgmlDType::F16), "f32" => quantized::QTensor::quantize(self, quantized::GgmlDType::F32), dt => { return Err(PyErr::new::<PyValueError, _>(format!( "unknown quantized-dtype {dt}" ))) } }; Ok(PyQTensor(Arc::new(res.map_err(wrap_err)?))) } } #[pyfunction] #[pyo3(text_signature = "(tensors:List[Tensor], dim:int )")] /// Concatenate the tensors across one axis. /// &RETURNS&: Tensor fn cat(tensors: Vec<PyTensor>, dim: i64) -> PyResult<PyTensor> { if tensors.is_empty() { return Err(PyErr::new::<PyValueError, _>("empty input to cat")); } let dim = actual_dim(&tensors[0], dim).map_err(wrap_err)?; let tensors = tensors.into_iter().map(|t| t.0).collect::<Vec<_>>(); let tensor = Tensor::cat(&tensors, dim).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(tensors:List[Tensor], dim:int)")] /// Stack the tensors along a new axis. /// &RETURNS&: Tensor fn stack(tensors: Vec<PyTensor>, dim: usize) -> PyResult<PyTensor> { let tensors = tensors.into_iter().map(|t| t.0).collect::<Vec<_>>(); let tensor = Tensor::stack(&tensors, dim).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(data:_ArrayLike)")] /// Creates a new tensor from a Python value. The value can be a scalar or array-like object. /// &RETURNS&: Tensor fn tensor(py: Python<'_>, data: PyObject) -> PyResult<PyTensor> { PyTensor::new(py, data) } #[pyfunction] #[pyo3(signature = (*shape,device=None), text_signature = "(*shape:Shape, device:Optional[Device]=None)")] /// Creates a new tensor with random values. /// &RETURNS&: Tensor fn rand(_py: Python<'_>, shape: PyShape, device: Option<PyDevice>) -> PyResult<PyTensor> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::rand(0f32, 1f32, shape, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape,device=None), text_signature = "(*shape:Shape, device:Optional[Device]=None)")] /// Creates a new tensor with random values from a normal distribution. /// &RETURNS&: Tensor fn randn(_py: Python<'_>, shape: PyShape, device: Option<PyDevice>) -> PyResult<PyTensor> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::randn(0f32, 1f32, shape, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape, dtype=None, device=None),text_signature = "(*shape:Shape, dtype:Optional[DType]=None, device:Optional[Device]=None)")] /// Creates a new tensor filled with ones. /// &RETURNS&: Tensor fn ones( py: Python<'_>, shape: PyShape, dtype: Option<PyObject>, device: Option<PyDevice>, ) -> PyResult<PyTensor> { let dtype = match dtype { None => DType::F32, Some(dtype) => PyDType::from_pyobject(dtype, py)?.0, }; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::ones(shape, dtype, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape, dtype=None, device=None), text_signature = "(*shape:Shape, dtype:Optional[DType]=None, device:Optional[Device]=None)")] /// Creates a new tensor filled with zeros. /// &RETURNS&: Tensor fn zeros( py: Python<'_>, shape: PyShape, dtype: Option<PyObject>, device: Option<PyDevice>, ) -> PyResult<PyTensor> { let dtype = match dtype { None => DType::F32, Some(dtype) => PyDType::from_pyobject(dtype, py)?.0, }; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::zeros(shape, dtype, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[derive(Debug, Clone)] #[pyclass(name = "QTensor")] /// A quantized tensor. struct PyQTensor(Arc<QTensor>); impl std::ops::Deref for PyQTensor { type Target = QTensor; fn deref(&self) -> &Self::Target { self.0.as_ref() } } #[pymethods] impl PyQTensor { #[getter] ///Gets the tensors quantized dtype. /// &RETURNS&: str fn ggml_dtype(&self) -> String { format!("{:?}", self.0.dtype()) } #[getter] ///Gets the rank of the tensor. /// &RETURNS&: int fn rank(&self) -> usize { self.0.rank() } #[getter] ///Gets the shape of the tensor. /// &RETURNS&: Tuple[int] fn shape(&self, py: Python<'_>) -> PyObject { PyTuple::new_bound(py, self.0.shape().dims()).to_object(py) } fn __repr__(&self) -> String { format!("{:?}", self.0) } fn __str__(&self) -> String { self.__repr__() } /// Dequantizes the tensor. /// &RETURNS&: Tensor fn dequantize(&self) -> PyResult<PyTensor> { let tensor = self.0.dequantize(&Device::Cpu).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyo3(text_signature = "(self, lhs:Tensor)")] /// Performs a quantized matrix multiplication, with the quantized tensor as the right hand side. /// &RETURNS&: Tensor fn matmul_t(&self, lhs: &PyTensor) -> PyResult<PyTensor> { let qmatmul = ::candle::quantized::QMatMul::from_arc(self.0.clone()).map_err(wrap_err)?; let res = qmatmul.forward(lhs).map_err(wrap_err)?; Ok(PyTensor(res)) } } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike])")] /// Loads a safetensors file. Returns a dictionary mapping tensor names to tensors. /// &RETURNS&: Dict[str,Tensor] fn load_safetensors(path: &str, py: Python<'_>) -> PyResult<PyObject> { let res = ::candle::safetensors::load(path, &Device::Cpu).map_err(wrap_err)?; let res = res .into_iter() .map(|(key, value)| (key, PyTensor(value).into_py(py))) .collect::<Vec<_>>(); Ok(res.into_py_dict_bound(py).to_object(py)) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike], tensors:Dict[str,Tensor])")] /// Saves a dictionary of tensors to a safetensors file. /// &RETURNS&: None fn save_safetensors( path: &str, tensors: std::collections::HashMap<String, PyTensor>, ) -> PyResult<()> { let tensors = tensors .into_iter() .map(|(s, t)| (s, t.0)) .collect::<std::collections::HashMap<_, _>>(); ::candle::safetensors::save(&tensors, path).map_err(wrap_err) } #[pyfunction] #[pyo3(signature = (path, device = None))] /// Load a GGML file. Returns a tuple of three objects: a dictionary mapping tensor names to tensors, /// a dictionary mapping hyperparameter names to hyperparameter values, and a vocabulary. /// &RETURNS&: Tuple[Dict[str,QTensor], Dict[str,Any], List[str]] fn load_ggml( path: &str, device: Option<PyDevice>, py: Python<'_>, ) -> PyResult<(PyObject, PyObject, PyObject)> { let mut file = std::fs::File::open(path)?; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let ggml = ::candle::quantized::ggml_file::Content::read(&mut file, &device).map_err(wrap_err)?; let tensors = ggml .tensors .into_iter() .map(|(key, qtensor)| Ok((key, PyQTensor(Arc::new(qtensor)).into_py(py)))) .collect::<::candle::Result<Vec<_>>>() .map_err(wrap_err)?; let tensors = tensors.into_py_dict_bound(py).to_object(py); let hparams = [ ("n_vocab", ggml.hparams.n_vocab), ("n_embd", ggml.hparams.n_embd), ("n_mult", ggml.hparams.n_mult), ("n_head", ggml.hparams.n_head), ("n_layer", ggml.hparams.n_layer), ("n_rot", ggml.hparams.n_rot), ("ftype", ggml.hparams.ftype), ]; let hparams = hparams.into_py_dict_bound(py).to_object(py); let vocab = ggml .vocab .token_score_pairs .iter() .map(|(bytes, _)| String::from_utf8_lossy(bytes.as_slice()).to_string()) .collect::<Vec<String>>() .to_object(py); Ok((tensors, hparams, vocab)) } #[pyfunction] #[pyo3(signature = (path, device = None))] /// Loads a GGUF file. Returns a tuple of two dictionaries: the first maps tensor names to tensors, /// and the second maps metadata keys to metadata values. /// &RETURNS&: Tuple[Dict[str,QTensor], Dict[str,Any]] fn load_gguf( path: &str, device: Option<PyDevice>, py: Python<'_>, ) -> PyResult<(PyObject, PyObject)> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; use ::candle::quantized::gguf_file; fn gguf_value_to_pyobject(v: &gguf_file::Value, py: Python<'_>) -> PyResult<PyObject> { let v: PyObject = match v { gguf_file::Value::U8(x) => x.into_py(py), gguf_file::Value::I8(x) => x.into_py(py), gguf_file::Value::U16(x) => x.into_py(py), gguf_file::Value::I16(x) => x.into_py(py), gguf_file::Value::U32(x) => x.into_py(py), gguf_file::Value::I32(x) => x.into_py(py), gguf_file::Value::U64(x) => x.into_py(py), gguf_file::Value::I64(x) => x.into_py(py), gguf_file::Value::F32(x) => x.into_py(py), gguf_file::Value::F64(x) => x.into_py(py), gguf_file::Value::Bool(x) => x.into_py(py), gguf_file::Value::String(x) => x.into_py(py), gguf_file::Value::Array(x) => { let list = pyo3::types::PyList::empty_bound(py); for elem in x.iter() { list.append(gguf_value_to_pyobject(elem, py)?)?; } list.into() } }; Ok(v) } let mut file = std::fs::File::open(path)?; let gguf = gguf_file::Content::read(&mut file).map_err(wrap_err)?; let tensors = gguf .tensor_infos .keys() .map(|key| { let qtensor = gguf.tensor(&mut file, key, &device)?; Ok((key, PyQTensor(Arc::new(qtensor)).into_py(py))) }) .collect::<::candle::Result<Vec<_>>>() .map_err(wrap_err)?; let tensors = tensors.into_py_dict_bound(py).to_object(py); let metadata = gguf .metadata .iter() .map(|(key, value)| Ok((key, gguf_value_to_pyobject(value, py)?))) .collect::<PyResult<Vec<_>>>()? .into_py_dict_bound(py) .to_object(py); Ok((tensors, metadata)) } #[pyfunction] #[pyo3( signature = (path, tensors, metadata) )] /// Save quanitzed tensors and metadata to a GGUF file. fn save_gguf(path: &str, tensors: PyObject, metadata: PyObject, py: Python<'_>) -> PyResult<()> { use ::candle::quantized::gguf_file; fn pyobject_to_gguf_value(v: &Bound<PyAny>, py: Python<'_>) -> PyResult<gguf_file::Value> { let v: gguf_file::Value = if let Ok(x) = v.extract::<u8>() { gguf_file::Value::U8(x) } else if let Ok(x) = v.extract::<i8>() { gguf_file::Value::I8(x) } else if let Ok(x) = v.extract::<u16>() { gguf_file::Value::U16(x) } else if let Ok(x) = v.extract::<i16>() { gguf_file::Value::I16(x) } else if let Ok(x) = v.extract::<u32>() { gguf_file::Value::U32(x) } else if let Ok(x) = v.extract::<i32>() { gguf_file::Value::I32(x) } else if let Ok(x) = v.extract::<u64>() { gguf_file::Value::U64(x) } else if let Ok(x) = v.extract::<i64>() { gguf_file::Value::I64(x) } else if let Ok(x) = v.extract::<f32>() { gguf_file::Value::F32(x) } else if let Ok(x) = v.extract::<f64>() { gguf_file::Value::F64(x) } else if let Ok(x) = v.extract::<bool>() { gguf_file::Value::Bool(x) } else if let Ok(x) = v.extract::<String>() { gguf_file::Value::String(x) } else if let Ok(x) = v.extract::<Vec<PyObject>>() { let x = x .into_iter() .map(|f| pyobject_to_gguf_value(f.bind(py), py)) .collect::<PyResult<Vec<_>>>()?; gguf_file::Value::Array(x) } else { return Err(PyErr::new::<PyValueError, _>(format!( "unsupported type {:?}", v ))); }; Ok(v) } let tensors = tensors .downcast_bound::<PyDict>(py) .map_err(|_| PyErr::new::<PyValueError, _>("expected a dict"))? .iter() .map(|(key, value)| { Ok(( key.extract::<String>() .map_err(|_| PyErr::new::<PyValueError, _>("keys must be strings"))?, value.extract::<PyQTensor>()?.0, )) }) .collect::<PyResult<Vec<_>>>()?; let metadata = metadata .downcast_bound::<PyDict>(py) .map_err(|_| PyErr::new::<PyValueError, _>("expected a dict"))? .iter() .map(|(key, value)| { Ok(( key.extract::<String>() .map_err(|_| PyErr::new::<PyValueError, _>("keys must be strings"))?, pyobject_to_gguf_value(&value.as_borrowed(), py)?, )) }) .collect::<PyResult<Vec<_>>>()?; let converted_metadata: Vec<_> = metadata .iter() .map(|(name, value)| (name.as_str(), value)) .collect(); let converted_tensors: Vec<_> = tensors .iter() .map(|(name, tensor)| (name.as_str(), tensor.as_ref())) .collect(); let mut file = std::fs::File::create(path)?; gguf_file::write(&mut file, &converted_metadata, &converted_tensors).map_err(wrap_err) } #[pyfunction] /// Returns true if the 'cuda' backend is available. /// &RETURNS&: bool fn cuda_is_available() -> bool { ::candle::utils::cuda_is_available() } #[pyfunction] /// Returns true if candle was compiled with 'accelerate' support. /// &RETURNS&: bool fn has_accelerate() -> bool { ::candle::utils::has_accelerate() } #[pyfunction] /// Returns true if candle was compiled with MKL support. /// &RETURNS&: bool fn has_mkl() -> bool { ::candle::utils::has_mkl() } #[pyfunction] /// Returns the number of threads used by the candle. /// &RETURNS&: int fn get_num_threads() -> usize { ::candle::utils::get_num_threads() } fn candle_utils(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_function(wrap_pyfunction!(cuda_is_available, m)?)?; m.add_function(wrap_pyfunction!(get_num_threads, m)?)?; m.add_function(wrap_pyfunction!(has_accelerate, m)?)?; m.add_function(wrap_pyfunction!(has_mkl, m)?)?; m.add_function(wrap_pyfunction!(load_ggml, m)?)?; m.add_function(wrap_pyfunction!(load_gguf, m)?)?; m.add_function(wrap_pyfunction!(save_gguf, m)?)?; m.add_function(wrap_pyfunction!(load_safetensors, m)?)?; m.add_function(wrap_pyfunction!(save_safetensors, m)?)?; Ok(()) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor, dim:int)")] /// Applies the Softmax function to a given tensor.# /// &RETURNS&: Tensor fn softmax(tensor: PyTensor, dim: i64) -> PyResult<PyTensor> { let dim = actual_dim(&tensor, dim).map_err(wrap_err)?; let sm = candle_nn::ops::softmax(&tensor.0, dim).map_err(wrap_err)?; Ok(PyTensor(sm)) } #[pyfunction] #[pyo3(signature = (tensor, ksize, *, stride=1), text_signature = "(tensor:Tensor, ksize:int, stride:int=1)")] /// Applies the 2d avg-pool function to a given tensor.# /// &RETURNS&: Tensor fn avg_pool2d(tensor: PyTensor, ksize: usize, stride: usize) -> PyResult<PyTensor> { let tensor = tensor .avg_pool2d_with_stride(ksize, stride) .map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (tensor, ksize, *, stride=1), text_signature = "(tensor:Tensor, ksize:int, stride:int=1)")] /// Applies the 2d max-pool function to a given tensor.# /// &RETURNS&: Tensor fn max_pool2d(tensor: PyTensor, ksize: usize, stride: usize) -> PyResult<PyTensor> { let tensor = tensor .max_pool2d_with_stride(ksize, stride) .map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Sigmoid Linear Unit (SiLU) function to a given tensor. /// &RETURNS&: Tensor fn silu(tensor: PyTensor) -> PyResult<PyTensor> { let s = candle_nn::ops::silu(&tensor.0).map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Gaussian Error Linear Unit (GELU) function to a given tensor. /// &RETURNS&: Tensor fn gelu(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.gelu_erf().map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Rectified Linear Unit (ReLU) function to a given tensor. /// &RETURNS&: Tensor fn relu(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.relu().map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the tanh function to a given tensor. /// &RETURNS&: Tensor fn tanh(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.tanh().map_err(wrap_err)?; Ok(PyTensor(s)) } fn candle_functional_m(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_function(wrap_pyfunction!(silu, m)?)?; m.add_function(wrap_pyfunction!(softmax, m)?)?; m.add_function(wrap_pyfunction!(max_pool2d, m)?)?; m.add_function(wrap_pyfunction!(avg_pool2d, m)?)?; m.add_function(wrap_pyfunction!(gelu, m)?)?; m.add_function(wrap_pyfunction!(relu, m)?)?; m.add_function(wrap_pyfunction!(tanh, m)?)?; Ok(()) } #[cfg(feature = "onnx")] fn candle_onnx_m(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { use onnx::{PyONNXModel, PyONNXTensorDescriptor}; m.add_class::<PyONNXModel>()?; m.add_class::<PyONNXTensorDescriptor>()?; Ok(()) } #[pymodule] fn candle(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { let utils = PyModule::new_bound(py, "utils")?; candle_utils(py, &utils)?; m.add_submodule(&utils)?; let nn = PyModule::new_bound(py, "functional")?; candle_functional_m(py, &nn)?; m.add_submodule(&nn)?; #[cfg(feature = "onnx")] { let onnx = PyModule::new_bound(py, "onnx")?; candle_onnx_m(py, &onnx)?; m.add_submodule(&onnx)?; } m.add_class::<PyTensor>()?; m.add_class::<PyQTensor>()?; m.add_class::<PyDType>()?; m.add("u8", PyDType(DType::U8))?; m.add("u32", PyDType(DType::U32))?; m.add("i64", PyDType(DType::I64))?; m.add("bf16", PyDType(DType::BF16))?; m.add("f16", PyDType(DType::F16))?; m.add("f32", PyDType(DType::F32))?; m.add("f64", PyDType(DType::F64))?; m.add_function(wrap_pyfunction!(cat, m)?)?; m.add_function(wrap_pyfunction!(ones, m)?)?; m.add_function(wrap_pyfunction!(rand, m)?)?; m.add_function(wrap_pyfunction!(randn, m)?)?; m.add_function(wrap_pyfunction!(tensor, m)?)?; m.add_function(wrap_pyfunction!(stack, m)?)?; m.add_function(wrap_pyfunction!(zeros, m)?)?; Ok(()) }
candle/candle-pyo3/src/lib.rs/0
{ "file_path": "candle/candle-pyo3/src/lib.rs", "repo_id": "candle", "token_count": 29668 }
//! Logit Processing and Sampling //! //! Functionality for modeling sampling strategies and logits processing in text generation //! with support for temperature-based sampling, top-k filtering, nucleus sampling (top-p), //! and combinations thereof. use candle::{Context, DType, Error, Result, Tensor}; use rand::{distributions::Distribution, SeedableRng}; #[derive(Clone, PartialEq, Debug)] pub enum Sampling { ArgMax, All { temperature: f64 }, TopK { k: usize, temperature: f64 }, TopP { p: f64, temperature: f64 }, TopKThenTopP { k: usize, p: f64, temperature: f64 }, } pub struct LogitsProcessor { rng: rand::rngs::StdRng, sampling: Sampling, } impl LogitsProcessor { pub fn from_sampling(seed: u64, sampling: Sampling) -> Self { let rng = rand::rngs::StdRng::seed_from_u64(seed); Self { rng, sampling } } pub fn new(seed: u64, temperature: Option<f64>, top_p: Option<f64>) -> Self { let temperature = temperature.and_then(|v| if v < 1e-7 { None } else { Some(v) }); let sampling = match temperature { None => Sampling::ArgMax, Some(temperature) => match top_p { None => Sampling::All { temperature }, Some(p) => Sampling::TopP { p, temperature }, }, }; Self::from_sampling(seed, sampling) } fn sample_argmax(&mut self, logits: Tensor) -> Result<u32> { let logits_v: Vec<f32> = logits.to_vec1()?; let next_token = logits_v .iter() .enumerate() .max_by(|(_, u), (_, v)| u.total_cmp(v)) .map(|(i, _)| i as u32) .context("empty logits")?; Ok(next_token) } fn sample_multinomial(&mut self, prs: &Vec<f32>) -> Result<u32> { let distr = rand::distributions::WeightedIndex::new(prs).map_err(Error::wrap)?; let next_token = distr.sample(&mut self.rng) as u32; Ok(next_token) } /// top-p sampling (or "nucleus sampling") samples from the smallest set of tokens that exceed /// probability top_p. This way we never sample tokens that have very low probabilities and are /// less likely to go "off the rails". fn sample_topp(&mut self, prs: &mut Vec<f32>, top_p: f32) -> Result<u32> { let mut argsort_indices = (0..prs.len()).collect::<Vec<_>>(); // Sort by descending probability. argsort_indices.sort_by(|&i, &j| prs[j].total_cmp(&prs[i])); // Clamp smaller probabilities to zero. let mut cumsum = 0.; for index in &argsort_indices { if cumsum >= top_p { prs[*index] = 0.0; } else { cumsum += prs[*index]; } } // Sample with clamped probabilities. self.sample_multinomial(prs) } // top-k sampling samples from the k tokens with the largest probabilities. fn sample_topk(&mut self, prs: &mut Vec<f32>, top_k: usize) -> Result<u32> { if top_k >= prs.len() { self.sample_multinomial(prs) } else { let mut argsort_indices = (0..prs.len()).collect::<Vec<_>>(); let (indices, _, _) = argsort_indices.select_nth_unstable_by(top_k, |&i, &j| prs[j].total_cmp(&prs[i])); let prs = indices.iter().map(|&i| prs[i]).collect::<Vec<_>>(); let index = self.sample_multinomial(&prs)?; Ok(indices[index as usize] as u32) } } // top-k sampling samples from the k tokens with the largest probabilities. // then top-p sampling. fn sample_topk_topp(&mut self, prs: &mut Vec<f32>, top_k: usize, top_p: f32) -> Result<u32> { if top_k >= prs.len() { self.sample_topp(prs, top_p) } else { let mut argsort_indices = (0..prs.len()).collect::<Vec<_>>(); let (indices, _, _) = argsort_indices.select_nth_unstable_by(top_k, |&i, &j| prs[j].total_cmp(&prs[i])); let mut prs = indices.iter().map(|&i| prs[i]).collect::<Vec<_>>(); let sum_p = prs.iter().sum::<f32>(); let index = if top_p <= 0.0 || top_p >= sum_p { self.sample_multinomial(&prs)? } else { self.sample_topp(&mut prs, top_p)? }; Ok(indices[index as usize] as u32) } } pub fn sample(&mut self, logits: &Tensor) -> Result<u32> { self.sample_f(logits, |_| {}) } pub fn sample_f(&mut self, logits: &Tensor, f: impl FnOnce(&mut [f32])) -> Result<u32> { let logits = logits.to_dtype(DType::F32)?; let prs = |temperature: f64| -> Result<Vec<f32>> { let logits = (&logits / temperature)?; let prs = candle_nn::ops::softmax_last_dim(&logits)?; let mut prs = prs.to_vec1()?; f(&mut prs); Ok(prs) }; let next_token = match &self.sampling { Sampling::ArgMax => self.sample_argmax(logits)?, Sampling::All { temperature } => { let prs = prs(*temperature)?; self.sample_multinomial(&prs)? } Sampling::TopP { p, temperature } => { let mut prs = prs(*temperature)?; if *p <= 0.0 || *p >= 1.0 { // simply sample from the predicted probability distribution self.sample_multinomial(&prs)? } else { // top-p (nucleus) sampling, clamping the least likely tokens to zero self.sample_topp(&mut prs, *p as f32)? } } Sampling::TopK { k, temperature } => { let mut prs = prs(*temperature)?; self.sample_topk(&mut prs, *k)? } Sampling::TopKThenTopP { k, p, temperature } => { let mut prs = prs(*temperature)?; self.sample_topk_topp(&mut prs, *k, *p as f32)? } }; Ok(next_token) } }
candle/candle-transformers/src/generation/mod.rs/0
{ "file_path": "candle/candle-transformers/src/generation/mod.rs", "repo_id": "candle", "token_count": 2975 }
//! Colpali Model for text/image similarity scoring. //! //! Colpali combines a vision encoder with an efficient LM for retrieving content. //! use candle::{Module, Result, Tensor}; use candle_nn::VarBuilder; use super::paligemma; use candle_nn::{linear, Linear}; pub struct Model { pub model: paligemma::Model, pub custom_text_projection: Linear, } impl Model { pub fn new(config: &paligemma::Config, vb: VarBuilder) -> Result<Self> { let model = paligemma::Model::new(config, vb.pp("model"))?; let custom_text_projection = linear( config.text_config.hidden_size, 128, vb.pp("custom_text_proj"), )?; Ok(Self { model, custom_text_projection, }) } pub fn forward_images(&mut self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<Tensor> { let outputs = self .model .setup_without_projection(pixel_values, input_ids)?; let outputs = self.custom_text_projection.forward(&outputs)?; let outputs = outputs.broadcast_div(&outputs.sqr()?.sum_keepdim(2)?.sqrt()?)?; Ok(outputs) } pub fn forward_text(&mut self, input_ids: &Tensor) -> Result<Tensor> { let outputs = self.model.forward_without_projection(input_ids)?; let outputs = self.custom_text_projection.forward(&outputs)?; let outputs = outputs.broadcast_div(&outputs.sqr()?.sum_keepdim(2)?.sqrt()?)?; Ok(outputs) } }
candle/candle-transformers/src/models/colpali.rs/0
{ "file_path": "candle/candle-transformers/src/models/colpali.rs", "repo_id": "candle", "token_count": 648 }
//! Flux Model //! //! Flux is a 12B rectified flow transformer capable of generating images from text descriptions. //! //! - 🤗 [Hugging Face Model](https://huggingface.co/black-forest-labs/FLUX.1-schnell) //! - 💻 [GitHub Repository](https://github.com/black-forest-labs/flux) //! - 📝 [Blog Post](https://blackforestlabs.ai/announcing-black-forest-labs/) //! //! # Usage //! //! ```bash //! cargo run --features cuda \ //! --example flux -r -- \ //! --height 1024 --width 1024 \ //! --prompt "a rusty robot walking on a beach holding a small torch, \ //! the robot has the word \"rust\" written on it, high quality, 4k" //! ``` //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/flux/assets/flux-robot.jpg" alt="" width=320> //! </div> //! use candle::{Result, Tensor}; pub trait WithForward { #[allow(clippy::too_many_arguments)] fn forward( &self, img: &Tensor, img_ids: &Tensor, txt: &Tensor, txt_ids: &Tensor, timesteps: &Tensor, y: &Tensor, guidance: Option<&Tensor>, ) -> Result<Tensor>; } pub mod autoencoder; pub mod model; pub mod quantized_model; pub mod sampling;
candle/candle-transformers/src/models/flux/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/flux/mod.rs", "repo_id": "candle", "token_count": 530 }
pub fn get_anyres_image_grid_shape( image_size: (u32, u32), grid_pinpoints: &[(u32, u32)], patch_size: u32, ) -> (u32, u32) { let (width, height) = select_best_resolution(image_size, grid_pinpoints); (width / patch_size, height / patch_size) } pub fn select_best_resolution( original_size: (u32, u32), possible_resolutions: &[(u32, u32)], ) -> (u32, u32) { let (original_width, original_height) = original_size; let mut best_fit = (0, 0); let original_width_f = original_width as f32; let original_height_f = original_height as f32; let mut max_effective_resolution = 0_u32; let mut min_wasted_resolution = u32::MAX; for (width, height) in possible_resolutions { let width_f = *width as f32; let height_f = *height as f32; let scale = (width_f / original_width_f).min(height_f / original_height_f); let (downscaled_width, downscaled_height) = ( (original_width_f * scale) as u32, (original_height_f * scale) as u32, ); let effective_resolution = std::cmp::min((*width) * (*height), downscaled_width * downscaled_height); let wasted_resolution = (*width) * (*height) - effective_resolution; if effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution) { best_fit = (*width, *height); max_effective_resolution = effective_resolution; min_wasted_resolution = wasted_resolution; } } best_fit }
candle/candle-transformers/src/models/llava/utils.rs/0
{ "file_path": "candle/candle-transformers/src/models/llava/utils.rs", "repo_id": "candle", "token_count": 689 }
// Implement the MMDiT model originally introduced for Stable Diffusion 3 (https://arxiv.org/abs/2403.03206), // as well as the MMDiT-X variant introduced for Stable Diffusion 3.5-medium (https://huggingface.co/stabilityai/stable-diffusion-3.5-medium) // This follows the implementation of the MMDiT model in the ComfyUI repository. // https://github.com/comfyanonymous/ComfyUI/blob/78e133d0415784924cd2674e2ee48f3eeca8a2aa/comfy/ldm/modules/diffusionmodules/mmdit.py#L1 // with MMDiT-X support following the Stability-AI/sd3.5 repository. // https://github.com/Stability-AI/sd3.5/blob/4e484e05308d83fb77ae6f680028e6c313f9da54/mmditx.py#L1 use candle::{Module, Result, Tensor, D}; use candle_nn as nn; use super::blocks::{ ContextQkvOnlyJointBlock, FinalLayer, JointBlock, MMDiTJointBlock, MMDiTXJointBlock, }; use super::embedding::{ PatchEmbedder, PositionEmbedder, TimestepEmbedder, Unpatchifier, VectorEmbedder, }; #[derive(Debug, Clone)] pub struct Config { pub patch_size: usize, pub in_channels: usize, pub out_channels: usize, pub depth: usize, pub head_size: usize, pub adm_in_channels: usize, pub pos_embed_max_size: usize, pub context_embed_size: usize, pub frequency_embedding_size: usize, } impl Config { pub fn sd3_medium() -> Self { Self { patch_size: 2, in_channels: 16, out_channels: 16, depth: 24, head_size: 64, adm_in_channels: 2048, pos_embed_max_size: 192, context_embed_size: 4096, frequency_embedding_size: 256, } } pub fn sd3_5_medium() -> Self { Self { patch_size: 2, in_channels: 16, out_channels: 16, depth: 24, head_size: 64, adm_in_channels: 2048, pos_embed_max_size: 384, context_embed_size: 4096, frequency_embedding_size: 256, } } pub fn sd3_5_large() -> Self { Self { patch_size: 2, in_channels: 16, out_channels: 16, depth: 38, head_size: 64, adm_in_channels: 2048, pos_embed_max_size: 192, context_embed_size: 4096, frequency_embedding_size: 256, } } } pub struct MMDiT { core: MMDiTCore, patch_embedder: PatchEmbedder, pos_embedder: PositionEmbedder, timestep_embedder: TimestepEmbedder, vector_embedder: VectorEmbedder, context_embedder: nn::Linear, unpatchifier: Unpatchifier, } impl MMDiT { pub fn new(cfg: &Config, use_flash_attn: bool, vb: nn::VarBuilder) -> Result<Self> { let hidden_size = cfg.head_size * cfg.depth; let core = MMDiTCore::new( cfg.depth, hidden_size, cfg.depth, cfg.patch_size, cfg.out_channels, use_flash_attn, vb.clone(), )?; let patch_embedder = PatchEmbedder::new( cfg.patch_size, cfg.in_channels, hidden_size, vb.pp("x_embedder"), )?; let pos_embedder = PositionEmbedder::new( hidden_size, cfg.patch_size, cfg.pos_embed_max_size, vb.clone(), )?; let timestep_embedder = TimestepEmbedder::new( hidden_size, cfg.frequency_embedding_size, vb.pp("t_embedder"), )?; let vector_embedder = VectorEmbedder::new(cfg.adm_in_channels, hidden_size, vb.pp("y_embedder"))?; let context_embedder = nn::linear( cfg.context_embed_size, hidden_size, vb.pp("context_embedder"), )?; let unpatchifier = Unpatchifier::new(cfg.patch_size, cfg.out_channels)?; Ok(Self { core, patch_embedder, pos_embedder, timestep_embedder, vector_embedder, context_embedder, unpatchifier, }) } pub fn forward( &self, x: &Tensor, t: &Tensor, y: &Tensor, context: &Tensor, skip_layers: Option<&[usize]>, ) -> Result<Tensor> { // Following the convention of the ComfyUI implementation. // https://github.com/comfyanonymous/ComfyUI/blob/78e133d0415784924cd2674e2ee48f3eeca8a2aa/comfy/ldm/modules/diffusionmodules/mmdit.py#L919 // // Forward pass of DiT. // x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) // t: (N,) tensor of diffusion timesteps // y: (N,) tensor of class labels let h = x.dim(D::Minus2)?; let w = x.dim(D::Minus1)?; let cropped_pos_embed = self.pos_embedder.get_cropped_pos_embed(h, w)?; let x = self .patch_embedder .forward(x)? .broadcast_add(&cropped_pos_embed)?; let c = self.timestep_embedder.forward(t)?; let y = self.vector_embedder.forward(y)?; let c = (c + y)?; let context = self.context_embedder.forward(context)?; let x = self.core.forward(&context, &x, &c, skip_layers)?; let x = self.unpatchifier.unpatchify(&x, h, w)?; x.narrow(2, 0, h)?.narrow(3, 0, w) } } pub struct MMDiTCore { joint_blocks: Vec<Box<dyn JointBlock>>, context_qkv_only_joint_block: ContextQkvOnlyJointBlock, final_layer: FinalLayer, } impl MMDiTCore { pub fn new( depth: usize, hidden_size: usize, num_heads: usize, patch_size: usize, out_channels: usize, use_flash_attn: bool, vb: nn::VarBuilder, ) -> Result<Self> { let mut joint_blocks = Vec::with_capacity(depth - 1); for i in 0..depth - 1 { let joint_block_vb_pp = format!("joint_blocks.{}", i); let joint_block: Box<dyn JointBlock> = if vb.contains_tensor(&format!("{}.x_block.attn2.qkv.weight", joint_block_vb_pp)) { Box::new(MMDiTXJointBlock::new( hidden_size, num_heads, use_flash_attn, vb.pp(&joint_block_vb_pp), )?) } else { Box::new(MMDiTJointBlock::new( hidden_size, num_heads, use_flash_attn, vb.pp(&joint_block_vb_pp), )?) }; joint_blocks.push(joint_block); } Ok(Self { joint_blocks, context_qkv_only_joint_block: ContextQkvOnlyJointBlock::new( hidden_size, num_heads, use_flash_attn, vb.pp(format!("joint_blocks.{}", depth - 1)), )?, final_layer: FinalLayer::new( hidden_size, patch_size, out_channels, vb.pp("final_layer"), )?, }) } pub fn forward( &self, context: &Tensor, x: &Tensor, c: &Tensor, skip_layers: Option<&[usize]>, ) -> Result<Tensor> { let (mut context, mut x) = (context.clone(), x.clone()); for (i, joint_block) in self.joint_blocks.iter().enumerate() { if let Some(skip_layers) = &skip_layers { if skip_layers.contains(&i) { continue; } } (context, x) = joint_block.forward(&context, &x, c)?; } let x = self.context_qkv_only_joint_block.forward(&context, &x, c)?; self.final_layer.forward(&x, c) } }
candle/candle-transformers/src/models/mmdit/model.rs/0
{ "file_path": "candle/candle-transformers/src/models/mmdit/model.rs", "repo_id": "candle", "token_count": 4203 }
//! Parler Model implementation for parler_tts text-to-speech synthesis //! //! Implements a transformer-based decoder architecture for generating audio tokens //! from text using discrete tokens. The model converts text into audio segments //! using multiple codebooks of quantized audio tokens. //! //! The model architecture includes: //! - Multi-head attention layers for text and audio processing //! - Feed-forward networks //! - Layer normalization //! - Positional embeddings //! - Multiple codebook prediction heads //! //! The implementation follows the original parler_tts architecture while focusing //! on audio token generation for text-to-speech synthesis. //! use crate::generation::LogitsProcessor; use crate::models::t5; use candle::{IndexOp, Result, Tensor}; use candle_nn::{layer_norm, linear_b as linear, Activation, LayerNorm, Linear, VarBuilder}; #[derive(serde::Deserialize, Debug, Clone)] pub struct DecoderConfig { pub vocab_size: usize, pub max_position_embeddings: usize, pub num_hidden_layers: usize, pub ffn_dim: usize, pub num_attention_heads: usize, pub num_key_value_heads: Option<usize>, pub num_cross_attention_key_value_heads: Option<usize>, pub activation_function: Activation, pub hidden_size: usize, pub scale_embedding: bool, pub num_codebooks: usize, pub pad_token_id: usize, pub bos_token_id: usize, pub eos_token_id: usize, pub tie_word_embeddings: bool, pub rope_embeddings: bool, pub rope_theta: f64, } #[derive(serde::Deserialize, Debug, Clone)] pub struct Config { pub decoder_start_token_id: u32, pub pad_token_id: u32, pub decoder: DecoderConfig, pub text_encoder: t5::Config, pub vocab_size: usize, pub audio_encoder: crate::models::dac::Config, } #[derive(Debug, Clone)] pub struct Attention { k_proj: Linear, v_proj: Linear, q_proj: Linear, out_proj: Linear, is_causal: bool, kv_cache: Option<(Tensor, Tensor)>, scaling: f64, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, } impl Attention { fn new( num_kv_heads: usize, is_causal: bool, cfg: &DecoderConfig, vb: VarBuilder, ) -> Result<Self> { if cfg.rope_embeddings { candle::bail!("rope embeddings are not supported"); } let embed_dim = cfg.hidden_size; let head_dim = embed_dim / cfg.num_attention_heads; let kv_out_dim = num_kv_heads * head_dim; let k_proj = linear(embed_dim, kv_out_dim, false, vb.pp("k_proj"))?; let v_proj = linear(embed_dim, kv_out_dim, false, vb.pp("v_proj"))?; let q_proj = linear(embed_dim, embed_dim, false, vb.pp("q_proj"))?; let out_proj = linear(embed_dim, embed_dim, false, vb.pp("out_proj"))?; Ok(Self { k_proj, v_proj, q_proj, out_proj, is_causal, kv_cache: None, scaling: (head_dim as f64).powf(-0.5), num_heads: cfg.num_attention_heads, num_kv_heads, num_kv_groups: cfg.num_attention_heads / num_kv_heads, head_dim, }) } fn forward( &mut self, xs: &Tensor, key_value_states: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b_sz, tgt_len, _) = xs.dims3()?; let query_states = (xs.apply(&self.q_proj)? * self.scaling)? .reshape((b_sz, tgt_len, self.num_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let key_states = match key_value_states { Some(states) => states.apply(&self.k_proj)?, None => xs.apply(&self.k_proj)?, }; let key_states = key_states .reshape((b_sz, (), self.num_kv_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let value_states = match key_value_states { Some(states) => states.apply(&self.v_proj)?, None => xs.apply(&self.v_proj)?, }; let value_states = value_states .reshape((b_sz, (), self.num_kv_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; if self.is_causal { self.kv_cache = Some((key_states.clone(), value_states.clone())); } let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_weights = query_states.matmul(&key_states.transpose(2, 3)?)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&value_states)?; attn_output .transpose(1, 2)? .reshape((b_sz, tgt_len, ()))? .apply(&self.out_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] pub struct DecoderLayer { self_attn: Attention, self_attn_layer_norm: LayerNorm, encoder_attn: Attention, encoder_attn_layer_norm: LayerNorm, fc1: Linear, fc2: Linear, final_layer_norm: LayerNorm, activation: Activation, } impl DecoderLayer { fn new(cfg: &DecoderConfig, vb: VarBuilder) -> Result<Self> { let kv_heads = cfg.num_key_value_heads.unwrap_or(cfg.num_attention_heads); let kv_heads_cross = cfg.num_cross_attention_key_value_heads.unwrap_or(kv_heads); let self_attn = Attention::new(kv_heads, true, cfg, vb.pp("self_attn"))?; let encoder_attn = Attention::new(kv_heads_cross, false, cfg, vb.pp("encoder_attn"))?; let self_attn_layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb.pp("self_attn_layer_norm"))?; let encoder_attn_layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb.pp("encoder_attn_layer_norm"))?; let fc1 = linear(cfg.hidden_size, cfg.ffn_dim, false, vb.pp("fc1"))?; let fc2 = linear(cfg.ffn_dim, cfg.hidden_size, false, vb.pp("fc2"))?; let final_layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb.pp("final_layer_norm"))?; Ok(Self { self_attn, self_attn_layer_norm, encoder_attn, encoder_attn_layer_norm, fc1, fc2, final_layer_norm, activation: cfg.activation_function, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, encoder_xs: &Tensor, encoder_attention_mask: Option<&Tensor>, ) -> Result<Tensor> { // Self attention let residual = xs; let xs = xs.apply(&self.self_attn_layer_norm)?; let xs = self.self_attn.forward(&xs, None, attention_mask)?; let xs = (residual + xs)?; // Cross attention let residual = &xs; let xs = xs.apply(&self.encoder_attn_layer_norm)?; let xs = self .encoder_attn .forward(&xs, Some(encoder_xs), encoder_attention_mask)?; let xs = (residual + xs)?; // Fully connected let residual = &xs; let xs = xs .apply(&self.final_layer_norm)? .apply(&self.fc1)? .apply(&self.activation)? .apply(&self.fc2)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache(); self.encoder_attn.clear_kv_cache(); } } #[derive(Debug, Clone)] pub struct Decoder { embed_tokens: Vec<candle_nn::Embedding>, embed_positions: Tensor, layers: Vec<DecoderLayer>, layer_norm: LayerNorm, num_codebooks: usize, hidden_size: usize, lm_heads: Vec<Linear>, dtype: candle::DType, } impl Decoder { pub fn new(cfg: &DecoderConfig, vb: VarBuilder) -> Result<Self> { let vb_d = vb.pp("model.decoder"); let mut embed_tokens = Vec::with_capacity(cfg.num_codebooks); let vb_e = vb_d.pp("embed_tokens"); for embed_idx in 0..cfg.num_codebooks { let e = candle_nn::embedding(cfg.vocab_size + 1, cfg.hidden_size, vb_e.pp(embed_idx))?; embed_tokens.push(e) } let embed_positions = vb_d.get( (cfg.max_position_embeddings, cfg.hidden_size), "embed_positions.weights", )?; let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_d.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let layer_norm = layer_norm(cfg.hidden_size, 1e-5, vb_d.pp("layer_norm"))?; let mut lm_heads = Vec::with_capacity(cfg.num_codebooks); let vb_l = vb.pp("lm_heads"); for lm_idx in 0..cfg.num_codebooks { let lm_head = linear(cfg.hidden_size, cfg.vocab_size, false, vb_l.pp(lm_idx))?; lm_heads.push(lm_head) } Ok(Self { embed_tokens, embed_positions, layers, layer_norm, num_codebooks: cfg.num_codebooks, lm_heads, hidden_size: cfg.hidden_size, dtype: vb.dtype(), }) } pub fn forward( &mut self, input_ids: &Tensor, prompt_hidden_states: Option<&Tensor>, attention_mask: Option<&Tensor>, encoder_xs: &Tensor, encoder_attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Vec<Tensor>> { let (b_sz, num_codebooks, seq_len) = input_ids.dims3()?; if num_codebooks != self.num_codebooks { candle::bail!("unexpected num codebooks in input {:?}", input_ids.shape()) } let mut inputs_embeds = Tensor::zeros( (b_sz, seq_len, self.hidden_size), self.dtype, input_ids.device(), )?; for (idx, embs) in self.embed_tokens.iter().enumerate() { let e = input_ids.i((.., idx))?.apply(embs)?; inputs_embeds = (inputs_embeds + e)? } let inputs_embeds = match prompt_hidden_states { None => inputs_embeds, Some(pis) => Tensor::cat(&[pis, &inputs_embeds], 1)?, }; let embed_positions = self .embed_positions .i(seqlen_offset..seqlen_offset + inputs_embeds.dim(1)?)?; let mut xs = (inputs_embeds + embed_positions.unsqueeze(0))?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask, encoder_xs, encoder_attention_mask)?; } let xs = xs.apply(&self.layer_norm)?; let mut lm_logits = Vec::with_capacity(self.num_codebooks); for lm_head in self.lm_heads.iter() { let logits = xs.apply(lm_head)?; lm_logits.push(logits) } Ok(lm_logits) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } } #[derive(Debug, Clone)] pub struct Model { pub embed_prompts: candle_nn::Embedding, pub enc_to_dec_proj: Option<Linear>, pub decoder: Decoder, pub text_encoder: t5::T5EncoderModel, pub decoder_start_token_id: u32, pub pad_token_id: u32, pub audio_encoder: crate::models::dac::Model, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let text_encoder = t5::T5EncoderModel::load(vb.pp("text_encoder"), &cfg.text_encoder)?; let decoder = Decoder::new(&cfg.decoder, vb.pp("decoder"))?; let embed_prompts = candle_nn::embedding( cfg.vocab_size, cfg.decoder.hidden_size, vb.pp("embed_prompts"), )?; let enc_to_dec_proj = if cfg.text_encoder.d_model != cfg.decoder.hidden_size { let proj = linear( cfg.text_encoder.d_model, cfg.decoder.hidden_size, true, vb.pp("enc_to_dec_proj"), )?; Some(proj) } else { None }; let audio_encoder = crate::models::dac::Model::new(&cfg.audio_encoder, vb.pp("audio_encoder"))?; Ok(Self { decoder, text_encoder, embed_prompts, enc_to_dec_proj, decoder_start_token_id: cfg.decoder_start_token_id, pad_token_id: cfg.pad_token_id, audio_encoder, }) } /// Note that the returned tensor uses the CPU device. pub fn generate( &mut self, prompt_tokens: &Tensor, description_tokens: &Tensor, mut lp: LogitsProcessor, max_steps: usize, ) -> Result<Tensor> { self.decoder.clear_kv_cache(); self.text_encoder.clear_kv_cache(); let encoded = self.text_encoder.forward(description_tokens)?; let encoded = match self.enc_to_dec_proj.as_ref() { None => encoded, Some(proj) => encoded.apply(proj)?, }; let prompt_hidden_states = prompt_tokens.apply(&self.embed_prompts)?; let num_codebooks = self.decoder.num_codebooks; let mut audio_tokens = vec![self.decoder_start_token_id; num_codebooks]; let mut all_audio_tokens = vec![vec![]; num_codebooks]; let prompt_len = prompt_hidden_states.dim(1)?; for step in 0..max_steps { let input_ids = Tensor::from_slice( audio_tokens.as_slice(), (1, num_codebooks, 1), prompt_tokens.device(), )?; let (prompt_hidden_states, pos) = if step == 0 { (Some(&prompt_hidden_states), 0) } else { (None, step + prompt_len) }; let causal_mask = if pos == 0 { self.prepare_causal_mask(prompt_len + 1, prompt_len + 1, input_ids.device())? } else { self.prepare_causal_mask(1, pos + 1, input_ids.device())? }; let logits = self.decoder.forward( &input_ids, prompt_hidden_states, Some(&causal_mask), &encoded, None, pos, )?; for (logit_idx, logit) in logits.iter().enumerate() { if logit_idx > step { break; } if audio_tokens[logit_idx] != self.pad_token_id { let logit = logit.i((0, logit.dim(1)? - 1))?; let token = lp.sample(&logit)?; audio_tokens[logit_idx] = token } } if audio_tokens.iter().all(|v| v == &self.pad_token_id) { break; } for (cb_idx, &token) in audio_tokens.iter().enumerate() { if token != self.decoder_start_token_id && token != self.pad_token_id { all_audio_tokens[cb_idx].push(token) } } } let min_len = all_audio_tokens.iter().map(|v| v.len()).min().unwrap_or(0); all_audio_tokens.iter_mut().for_each(|v| { v.resize(min_len, 0); }); let all_audio_tokens = Tensor::new(all_audio_tokens, &candle::Device::Cpu)?; Ok(all_audio_tokens) } fn prepare_causal_mask( &self, q_len: usize, kv_len: usize, device: &candle::Device, ) -> Result<Tensor> { let mask: Vec<_> = (0..q_len) .flat_map(|i| { (0..kv_len).map(move |j| { if i + kv_len < j + q_len { f32::NEG_INFINITY } else { 0. } }) }) .collect(); Tensor::from_slice(&mask, (q_len, kv_len), device) } }
candle/candle-transformers/src/models/parler_tts.rs/0
{ "file_path": "candle/candle-transformers/src/models/parler_tts.rs", "repo_id": "candle", "token_count": 8561 }
//! Phi2 model implementation with quantization support. //! //! Phi2 is a 2.7B parameter language model using scaled-up Transformer decoder architecture. //! This implementation provides quantization for reduced memory and compute usage. //! //! Key characteristics: //! - Partial attention with learned mixing to reduce quadratic costs //! - Layer reuse for improved inference efficiency //! - Linear transformations with scalar mixing //! - Rotary positional embeddings (RoPE) //! - Support for 8-bit quantization //! //! References: //! - [Phi2 Paper](https://arxiv.org/abs/2309.05463) //! - [Model Card](https://huggingface.co/microsoft/phi-2) //! use std::collections::HashMap; use candle::quantized::gguf_file; use candle::quantized::QTensor; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{Embedding, LayerNorm}; pub const MAX_SEQ_LEN: usize = 4096; #[derive(Debug, Clone)] struct QLinear { inner: candle::quantized::QMatMul, bias: Tensor, span: tracing::Span, } impl QLinear { fn new<R: std::io::Read + std::io::Seek>( ct: &gguf_file::Content, r: &mut R, name: &str, device: &Device, ) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "qmatmul"); let w = ct.tensor(r, &format!("{name}.weight"), device)?; let b = ct.tensor(r, &format!("{name}.bias"), device)?; let inner = candle::quantized::QMatMul::from_qtensor(w)?; let bias = b.dequantize(device)?; Ok(Self { inner, bias, span }) } } impl Module for QLinear { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs)?.broadcast_add(&self.bias) } } #[derive(Debug, Clone)] struct Mlp { ffn_up: QLinear, ffn_down: QLinear, } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.ffn_up)?.gelu()?.apply(&self.ffn_down) } } #[derive(Debug, Clone)] struct LayerWeights { attn_qkv: QLinear, attn_output: QLinear, attn_norm: LayerNorm, mlp: Mlp, n_head: usize, n_kv_head: usize, head_dim: usize, cos: Tensor, sin: Tensor, rope_dim: usize, neg_inf: Tensor, kv_cache: Option<(Tensor, Tensor)>, span_attn: tracing::Span, span_rot: tracing::Span, } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> { let shape = mask.shape(); let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?; Ok(m) } impl LayerWeights { fn apply_rotary_emb(&self, xs: &Tensor, index_pos: usize) -> Result<Tensor> { let _enter = self.span_rot.enter(); let (_b_sz, _n_head, seq_len, _n_embd) = xs.dims4()?; let xs_rot = xs.i((.., .., .., ..self.rope_dim))?; let xs_pass = xs.i((.., .., .., self.rope_dim..))?; let cos = self.cos.narrow(0, index_pos, seq_len)?; let sin = self.sin.narrow(0, index_pos, seq_len)?; let xs_rot = candle_nn::rotary_emb::rope(&xs_rot.contiguous()?, &cos, &sin)?; Tensor::cat(&[&xs_rot, &xs_pass], D::Minus1) } fn forward_attn( &mut self, x: &Tensor, mask: Option<&Tensor>, index_pos: usize, ) -> Result<Tensor> { let _enter = self.span_attn.enter(); let (b_sz, seq_len, n_embd) = x.dims3()?; let qkv = self.attn_qkv .forward(x)? .reshape((b_sz, seq_len, 3, self.n_head, self.head_dim))?; let q = qkv.i((.., .., 0))?.transpose(1, 2)?; let k = qkv.i((.., .., 1))?.transpose(1, 2)?; let v = qkv.i((.., .., 2))?.transpose(1, 2)?; // This call to contiguous ensures that the fast kernel can be called below. It's // actually a no-op except when processing the initial prompt so has no significant // impact on performance. let v = v.contiguous()?; let q = self.apply_rotary_emb(&q, index_pos)?.contiguous()?; let k = self.apply_rotary_emb(&k, index_pos)?; let (k, v) = match &self.kv_cache { None => (k.contiguous()?, v.contiguous()?), Some((k_cache, v_cache)) => { if index_pos == 0 { (k.contiguous()?, v.contiguous()?) } else { let k = Tensor::cat(&[k_cache, &k], 2)?; let v = Tensor::cat(&[v_cache, &v], 2)?; (k.contiguous()?, v.contiguous()?) } } }; self.kv_cache = Some((k.clone(), v.clone())); let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?; let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let att = match mask { None => att, Some(mask) => { let mask = mask.broadcast_as(att.shape())?; masked_fill(&att, &mask, &self.neg_inf)? } }; let att = candle_nn::ops::softmax_last_dim(&att)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = att.matmul(&v.contiguous()?)?; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.attn_output.forward(&y)?; Ok(y) } } #[derive(Debug, Clone)] pub struct ModelWeights { tok_embeddings: Embedding, layers: Vec<LayerWeights>, output_norm: LayerNorm, output: QLinear, masks: HashMap<usize, Tensor>, span: tracing::Span, span_output: tracing::Span, } fn precomput_freqs_cis( head_dim: usize, freq_base: f32, device: &Device, ) -> Result<(Tensor, Tensor)> { let theta: Vec<_> = (0..head_dim) .step_by(2) .map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), device)?; let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)? .to_dtype(DType::F32)? .reshape((MAX_SEQ_LEN, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let cos = idx_theta.cos()?; let sin = idx_theta.sin()?; Ok((cos, sin)) } fn layer_norm(w: QTensor, b: QTensor, eps: f64) -> Result<LayerNorm> { let w = w.dequantize(&w.device())?; let b = b.dequantize(&b.device())?; let ln = LayerNorm::new(w, b, eps); Ok(ln) } impl ModelWeights { pub fn from_gguf<R: std::io::Seek + std::io::Read>( ct: gguf_file::Content, reader: &mut R, device: &Device, ) -> Result<Self> { let md_get = |s: &str| match ct.metadata.get(s) { None => candle::bail!("cannot find {s} in metadata"), Some(v) => Ok(v), }; // Parameter extraction from metadata. let head_count = md_get("phi2.attention.head_count")?.to_u32()? as usize; let head_count_kv = md_get("phi2.attention.head_count_kv")?.to_u32()? as usize; let block_count = md_get("phi2.block_count")?.to_u32()? as usize; let embedding_length = md_get("phi2.embedding_length")?.to_u32()? as usize; let rope_dim = md_get("phi2.rope.dimension_count")?.to_u32()? as usize; let ln_eps = md_get("phi2.attention.layer_norm_epsilon")?.to_f32()? as f64; let (cos, sin) = precomput_freqs_cis(rope_dim, 10_000., device)?; let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?; let tok_embeddings = ct.tensor(reader, "token_embd.weight", device)?; let tok_embeddings = tok_embeddings.dequantize(device)?; let output_norm = layer_norm( ct.tensor(reader, "output_norm.weight", device)?, ct.tensor(reader, "output_norm.bias", device)?, ln_eps, )?; let output = QLinear::new(&ct, reader, "output", device)?; let mut layers = Vec::with_capacity(block_count); for layer_idx in 0..block_count { let prefix = format!("blk.{layer_idx}"); let ffn_up = QLinear::new(&ct, reader, &format!("{prefix}.ffn_up"), device)?; let ffn_down = QLinear::new(&ct, reader, &format!("{prefix}.ffn_down"), device)?; let mlp = Mlp { ffn_up, ffn_down }; let attn_norm = layer_norm( ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?, ct.tensor(reader, &format!("{prefix}.attn_norm.bias"), device)?, ln_eps, )?; let span_attn = tracing::span!(tracing::Level::TRACE, "attn"); let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot"); layers.push(LayerWeights { attn_qkv: QLinear::new(&ct, reader, &format!("{prefix}.attn_qkv"), device)?, attn_output: QLinear::new(&ct, reader, &format!("{prefix}.attn_output"), device)?, attn_norm, mlp, n_head: head_count, n_kv_head: head_count_kv, head_dim: embedding_length / head_count, cos: cos.clone(), sin: sin.clone(), rope_dim, neg_inf: neg_inf.clone(), kv_cache: None, span_attn, span_rot, }) } let span = tracing::span!(tracing::Level::TRACE, "model"); let span_output = tracing::span!(tracing::Level::TRACE, "output"); Ok(Self { tok_embeddings: Embedding::new(tok_embeddings, embedding_length), layers, output_norm, output, masks: HashMap::new(), span, span_output, }) } fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> { if let Some(mask) = self.masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), device)?; self.masks.insert(t, mask.clone()); Ok(mask) } } pub fn forward(&mut self, xs: &Tensor, index_pos: usize) -> Result<Tensor> { let (_b_sz, seq_len) = xs.dims2()?; let mask = if seq_len == 1 { None } else { Some(self.mask(seq_len, xs.device())?) }; let _enter = self.span.enter(); let mut xs = self.tok_embeddings.forward(xs)?; for layer in self.layers.iter_mut() { let residual = &xs; let xs_norm = xs.apply(&layer.attn_norm)?; let attn_outputs = layer.forward_attn(&xs_norm, mask.as_ref(), index_pos)?; let feed_forward_hidden_states = layer.mlp.forward(&xs_norm)?; xs = (attn_outputs + feed_forward_hidden_states + residual)? } let xs = xs.apply(&self.output_norm)?.i((.., seq_len - 1, ..))?; let _enter = self.span_output.enter(); self.output.forward(&xs) } }
candle/candle-transformers/src/models/quantized_phi.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_phi.rs", "repo_id": "candle", "token_count": 5545 }
use candle::{DType, IndexOp, Result, Tensor}; use candle_nn::{layer_norm, LayerNorm, Module, VarBuilder}; #[derive(Debug)] struct PatchEmbed { proj: candle_nn::Conv2d, span: tracing::Span, } impl PatchEmbed { fn new( in_chans: usize, embed_dim: usize, k_size: usize, stride: usize, padding: usize, vb: VarBuilder, ) -> Result<Self> { let cfg = candle_nn::Conv2dConfig { stride, padding, ..Default::default() }; let proj = candle_nn::conv2d(in_chans, embed_dim, k_size, cfg, vb.pp("proj"))?; let span = tracing::span!(tracing::Level::TRACE, "patch-embed"); Ok(Self { proj, span }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.proj)?.permute((0, 2, 3, 1)) } } // A custom op to make add_decomposed_rel_pos faster. Most of the time is spent on the final // addition in the case where b = 12, q_h = q_w = 4096, k_h = k_w = 4096 // (attn.reshape((b, q_h, q_w, k_h, k_w))? // + rel_h.unsqueeze(4)?.broadcast_add(&rel_w.unsqueeze(3)?)?)? // .reshape((b, q_h * q_w, k_h * k_w)) // Ideally we would perform this operation in place but this is not supported in candle at the // moment. We should also investigate using f16 rather than f32. struct Add3(usize, usize, usize, usize, usize); impl candle::CustomOp3 for Add3 { fn name(&self) -> &'static str { "add3" } fn cpu_fwd( &self, s1: &candle::CpuStorage, l1: &candle::Layout, s2: &candle::CpuStorage, l2: &candle::Layout, s3: &candle::CpuStorage, l3: &candle::Layout, ) -> Result<(candle::CpuStorage, candle::Shape)> { use rayon::prelude::*; let Add3(b, q_h, q_w, k_h, k_w) = *self; let s1 = s1.as_slice::<f32>()?; let s1 = match l1.contiguous_offsets() { None => candle::bail!("input1 has to be contiguous"), Some((o1, o2)) => &s1[o1..o2], }; let s2 = s2.as_slice::<f32>()?; let s2 = match l2.contiguous_offsets() { None => candle::bail!("input2 has to be contiguous"), Some((o1, o2)) => &s2[o1..o2], }; let s3 = s3.as_slice::<f32>()?; let s3 = match l3.contiguous_offsets() { None => candle::bail!("input3 has to be contiguous"), Some((o1, o2)) => &s3[o1..o2], }; let mut dst = vec![0f32; b * q_h * q_w * k_h * k_w]; dst.par_chunks_exact_mut(k_h * k_w) .enumerate() .for_each(|(b_idx, dst)| { let s1_idx = b_idx * k_h * k_w; let s2_idx = b_idx * k_h; let s3_idx = b_idx * k_w; for h_idx in 0..k_h { let s1_idx = s1_idx + h_idx * k_w; let s2_idx = s2_idx + h_idx; let dst_idx = h_idx * k_w; for w_idx in 0..k_w { let s1_idx = s1_idx + w_idx; let s3_idx = s3_idx + w_idx; let dst_idx = dst_idx + w_idx; dst[dst_idx] = s1[s1_idx] + s2[s2_idx] + s3[s3_idx] } } }); let dst = candle::WithDType::to_cpu_storage_owned(dst); Ok((dst, (b, q_h * q_w, k_h * k_w).into())) } } #[derive(Debug)] struct Attention { qkv: super::Linear, proj: super::Linear, num_heads: usize, scale: f64, rel_pos_hw: Option<(Tensor, Tensor)>, span: tracing::Span, span_matmul: tracing::Span, span_rel_pos: tracing::Span, span_softmax: tracing::Span, } impl Attention { fn new( dim: usize, num_heads: usize, qkv_bias: bool, use_rel_pos: bool, input_size: (usize, usize), vb: VarBuilder, ) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "attention"); let span_matmul = tracing::span!(tracing::Level::TRACE, "attn-matmul"); let span_rel_pos = tracing::span!(tracing::Level::TRACE, "attn-rel-pos"); let span_softmax = tracing::span!(tracing::Level::TRACE, "attn-sm"); let qkv = super::linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?; let proj = super::linear(vb.pp("proj"), dim, dim, true)?; let head_dim = dim / num_heads; let scale = 1. / (head_dim as f64).sqrt(); let rel_pos_hw = if use_rel_pos { let h = vb.get((2 * input_size.0 - 1, head_dim), "rel_pos_h")?; let w = vb.get((2 * input_size.1 - 1, head_dim), "rel_pos_w")?; Some((h, w)) } else { None }; Ok(Self { qkv, proj, num_heads, scale, rel_pos_hw, span, span_matmul, span_rel_pos, span_softmax, }) } fn add_decomposed_rel_pos( &self, attn: Tensor, q: &Tensor, (q_h, q_w): (usize, usize), (k_h, k_w): (usize, usize), ) -> Result<Tensor> { match &self.rel_pos_hw { Some((rel_pos_h, rel_pos_w)) => { let r_h = get_rel_pos(q_h, k_h, rel_pos_h)?; let r_w = get_rel_pos(q_w, k_w, rel_pos_w)?; let (b, _, dim) = q.dims3()?; let r_q = q.reshape((b, q_h, q_w, dim))?; // rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) let rel_h = r_q.matmul(&r_h.broadcast_left(b)?.t()?.contiguous()?)?; // rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) let rel_w = r_q .transpose(1, 2)? // -> bwhc .contiguous()? .matmul(&r_w.broadcast_left(b)?.t()?.contiguous()?)? // bwhc,bwck -> bwhk .transpose(1, 2)? .contiguous()?; if attn.device().is_cpu() { let op = Add3(b, q_h, q_w, k_h, k_w); attn.apply_op3_no_bwd(&rel_h, &rel_w, &op) } else { (attn.reshape((b, q_h, q_w, k_h, k_w))? + rel_h.unsqueeze(4)?.broadcast_add(&rel_w.unsqueeze(3)?)?)? .reshape((b, q_h * q_w, k_h * k_w)) } } None => Ok(attn), } } } fn get_rel_pos(q_size: usize, k_size: usize, rel_pos: &Tensor) -> Result<Tensor> { let max_rel_dist = 2 * usize::max(q_size, k_size) - 1; let dev = rel_pos.device(); let rel_pos_resized = if rel_pos.dim(0)? != max_rel_dist { todo!("interpolation") } else { rel_pos }; let q_coords = Tensor::arange(0u32, q_size as u32, dev)? .reshape((q_size, 1))? .to_dtype(DType::F32)?; let k_coords = Tensor::arange(0u32, k_size as u32, dev)? .reshape((1, k_size))? .to_dtype(DType::F32)?; let q_coords = (q_coords * f64::max(1f64, k_size as f64 / q_size as f64))?; let k_coords = (k_coords * f64::max(1f64, q_size as f64 / k_size as f64))?; let relative_coords = (q_coords.broadcast_sub(&k_coords)? + (k_size as f64 - 1.) * f64::max(1f64, q_size as f64 / k_size as f64))?; let (d1, d2) = relative_coords.dims2()?; let relative_coords = relative_coords.to_dtype(DType::U32)?; rel_pos_resized .index_select(&relative_coords.reshape(d1 * d2)?, 0)? .reshape((d1, d2, ())) } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b, h, w, c) = xs.dims4()?; let qkv = self .qkv .forward(&xs.flatten_to(1)?)? .reshape((b, h * w, 3, self.num_heads, c / self.num_heads))? .permute((2, 0, 3, 1, 4))? .reshape((3, b * self.num_heads, h * w, c / self.num_heads))?; let q = qkv.i(0)?; let k = qkv.i(1)?; let v = qkv.i(2)?; let attn = { let _enter = self.span_matmul.enter(); (&q * self.scale)?.matmul(&k.t()?)? }; let attn = { let _enter = self.span_rel_pos.enter(); self.add_decomposed_rel_pos(attn, &q, (h, w), (h, w))? }; let attn = { let _enter = self.span_softmax.enter(); candle_nn::ops::softmax_last_dim(&attn)? }; let attn = { let _enter = self.span_matmul.enter(); attn.matmul(&v)? }; let attn = attn .reshape((b, self.num_heads, h, w, c / self.num_heads))? .permute((0, 2, 3, 1, 4))? .reshape((b, h * w, c))?; self.proj.forward(&attn)?.reshape((b, h, w, c)) } } #[derive(Debug)] struct Block { norm1: LayerNorm, attn: Attention, norm2: LayerNorm, mlp: super::MlpBlock, window_size: usize, span: tracing::Span, } impl Block { fn new( dim: usize, num_heads: usize, qkv_bias: bool, use_rel_pos: bool, window_size: usize, input_size: (usize, usize), vb: VarBuilder, ) -> Result<Self> { let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?; let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?; let input_size_attn = if window_size == 0 { input_size } else { (window_size, window_size) }; let attn = Attention::new( dim, num_heads, qkv_bias, use_rel_pos, input_size_attn, vb.pp("attn"), )?; let mlp = super::MlpBlock::new(dim, dim * 4, candle_nn::Activation::Gelu, vb.pp("mlp"))?; let span = tracing::span!(tracing::Level::TRACE, "ie-block"); Ok(Self { norm1, attn, norm2, mlp, window_size, span, }) } } fn window_partition(xs: Tensor, window_size: usize) -> Result<(Tensor, (usize, usize))> { let (b, h, w, c) = xs.dims4()?; let pad_h = (window_size - h % window_size) % window_size; let pad_w = (window_size - w % window_size) % window_size; let xs = if pad_h > 0 { xs.pad_with_zeros(1, 0, pad_h)? } else { xs }; let xs = if pad_w > 0 { xs.pad_with_zeros(2, 0, pad_w)? } else { xs }; let (h_p, w_p) = (h + pad_h, w + pad_w); let windows = xs .reshape(( b, h_p / window_size, window_size, w_p / window_size, window_size, c, ))? .transpose(2, 3)? .contiguous()? .flatten_to(2)?; Ok((windows, (h_p, w_p))) } fn window_unpartition( windows: Tensor, window_size: usize, (h_p, w_p): (usize, usize), (h, w): (usize, usize), ) -> Result<Tensor> { let b = windows.dim(0)? / (h_p * w_p / window_size / window_size); let xs = windows .reshape(( b, h_p / window_size, w_p / window_size, window_size, window_size, windows.elem_count() / b / h_p / w_p, ))? .transpose(2, 3)? .contiguous()? .reshape((b, h_p, w_p, ()))?; let xs = if h_p > h { xs.narrow(1, 0, h)? } else { xs }; let xs = if w_p > w { xs.narrow(2, 0, w)? } else { xs }; Ok(xs) } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let shortcut = xs; let xs = self.norm1.forward(xs)?; let hw = (xs.dim(1)?, xs.dim(2)?); let (xs, pad_hw) = if self.window_size > 0 { window_partition(xs, self.window_size)? } else { (xs, (0, 0)) }; let xs = self.attn.forward(&xs)?; let xs = if self.window_size > 0 { window_unpartition(xs, self.window_size, pad_hw, hw)? } else { xs }; let xs = (xs + shortcut)?; &xs + xs.apply(&self.norm2)?.apply(&self.mlp)? } } #[derive(Debug)] pub struct ImageEncoderViT { patch_embed: PatchEmbed, blocks: Vec<Block>, neck_conv1: candle_nn::Conv2d, neck_ln1: super::LayerNorm2d, neck_conv2: candle_nn::Conv2d, neck_ln2: super::LayerNorm2d, pos_embed: Option<Tensor>, span: tracing::Span, } impl ImageEncoderViT { #[allow(clippy::too_many_arguments)] pub fn new( img_size: usize, patch_size: usize, in_chans: usize, embed_dim: usize, depth: usize, num_heads: usize, out_chans: usize, qkv_bias: bool, use_rel_pos: bool, use_abs_pos: bool, window_size: usize, global_attn_indexes: &[usize], vb: VarBuilder, ) -> Result<Self> { let patch_embed = PatchEmbed::new( in_chans, embed_dim, patch_size, patch_size, 0, vb.pp("patch_embed"), )?; let mut blocks = Vec::with_capacity(depth); let vb_b = vb.pp("blocks"); for i in 0..depth { let window_size = if global_attn_indexes.contains(&i) { 0 } else { window_size }; let block = Block::new( embed_dim, num_heads, qkv_bias, use_rel_pos, window_size, (img_size / patch_size, img_size / patch_size), vb_b.pp(i), )?; blocks.push(block) } let neck_conv1 = candle_nn::conv2d_no_bias( embed_dim, out_chans, 1, Default::default(), vb.pp("neck.0"), )?; let neck_ln1 = super::LayerNorm2d::new(out_chans, 1e-6, vb.pp("neck.1"))?; let cfg = candle_nn::Conv2dConfig { padding: 1, ..Default::default() }; let neck_conv2 = candle_nn::conv2d_no_bias(out_chans, out_chans, 3, cfg, vb.pp("neck.2"))?; let neck_ln2 = super::LayerNorm2d::new(out_chans, 1e-6, vb.pp("neck.3"))?; let pos_embed = if use_abs_pos { let p = vb.get( (1, img_size / patch_size, img_size / patch_size, embed_dim), "pos_embed", )?; Some(p) } else { None }; let span = tracing::span!(tracing::Level::TRACE, "image-encoder-vit"); Ok(Self { patch_embed, blocks, neck_conv1, neck_ln1, neck_conv2, neck_ln2, pos_embed, span, }) } } impl Module for ImageEncoderViT { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.patch_embed.forward(xs)?; let mut xs = match &self.pos_embed { Some(pos_embed) => (xs + pos_embed)?, None => xs, }; for block in self.blocks.iter() { xs = block.forward(&xs)? } xs.permute((0, 3, 1, 2))? .apply(&self.neck_conv1)? .apply(&self.neck_ln1)? .apply(&self.neck_conv2)? .apply(&self.neck_ln2) } }
candle/candle-transformers/src/models/segment_anything/image_encoder.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/image_encoder.rs", "repo_id": "candle", "token_count": 8848 }
#![allow(dead_code)] //! # Diffusion pipelines and models //! //! Noise schedulers can be used to set the trade-off between //! inference speed and quality. use candle::{Result, Tensor}; pub trait SchedulerConfig: std::fmt::Debug + Send + Sync { fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>>; } /// This trait represents a scheduler for the diffusion process. pub trait Scheduler { fn timesteps(&self) -> &[usize]; fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor>; fn init_noise_sigma(&self) -> f64; fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Result<Tensor>; fn step(&mut self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor>; } /// This represents how beta ranges from its minimum value to the maximum /// during training. #[derive(Debug, Clone, Copy)] pub enum BetaSchedule { /// Linear interpolation. Linear, /// Linear interpolation of the square root of beta. ScaledLinear, /// Glide cosine schedule SquaredcosCapV2, } #[derive(Debug, Clone, Copy)] pub enum PredictionType { Epsilon, VPrediction, Sample, } /// Time step spacing for the diffusion process. /// /// "linspace", "leading", "trailing" corresponds to annotation of Table 2. of the [paper](https://arxiv.org/abs/2305.08891) #[derive(Debug, Clone, Copy)] pub enum TimestepSpacing { Leading, Linspace, Trailing, } impl Default for TimestepSpacing { fn default() -> Self { Self::Leading } } /// Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of /// `(1-beta)` over time from `t = [0,1]`. /// /// Contains a function `alpha_bar` that takes an argument `t` and transforms it to the cumulative product of `(1-beta)` /// up to that part of the diffusion process. pub(crate) fn betas_for_alpha_bar(num_diffusion_timesteps: usize, max_beta: f64) -> Result<Tensor> { let alpha_bar = |time_step: usize| { f64::cos((time_step as f64 + 0.008) / 1.008 * std::f64::consts::FRAC_PI_2).powi(2) }; let mut betas = Vec::with_capacity(num_diffusion_timesteps); for i in 0..num_diffusion_timesteps { let t1 = i / num_diffusion_timesteps; let t2 = (i + 1) / num_diffusion_timesteps; betas.push((1.0 - alpha_bar(t2) / alpha_bar(t1)).min(max_beta)); } let betas_len = betas.len(); Tensor::from_vec(betas, betas_len, &candle::Device::Cpu) }
candle/candle-transformers/src/models/stable_diffusion/schedulers.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/schedulers.rs", "repo_id": "candle", "token_count": 940 }
//! Apply penalty and repeat_kv use candle::{Result, Tensor}; pub fn apply_repeat_penalty(logits: &Tensor, penalty: f32, context: &[u32]) -> Result<Tensor> { let device = logits.device(); let mut logits = logits.to_dtype(candle::DType::F32)?.to_vec1::<f32>()?; let mut already_seen = std::collections::HashSet::new(); for token_id in context { if already_seen.contains(token_id) { continue; } already_seen.insert(token_id); if let Some(logit) = logits.get_mut(*token_id as usize) { if *logit >= 0. { *logit /= penalty } else { *logit *= penalty } } } let logits_len = logits.len(); Tensor::from_vec(logits, logits_len, device) } /// Repeats a key or value tensor for grouped query attention /// The input tensor should have a shape `(batch, num_kv_heads, seq_len, head_dim)`, pub fn repeat_kv(xs: Tensor, n_rep: usize) -> Result<Tensor> { if n_rep == 1 { Ok(xs) } else { let (b_sz, n_kv_head, seq_len, head_dim) = xs.dims4()?; // Using cat is faster than a broadcast as it avoids going through a potentially // strided copy. // https://github.com/huggingface/candle/pull/2043 Tensor::cat(&vec![&xs; n_rep], 2)?.reshape((b_sz, n_kv_head * n_rep, seq_len, head_dim)) } }
candle/candle-transformers/src/utils.rs/0
{ "file_path": "candle/candle-transformers/src/utils.rs", "repo_id": "candle", "token_count": 642 }
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use candle_transformers::models::blip; use candle_transformers::models::quantized_blip; use candle_wasm_example_blip::console_log; use candle_wasm_example_blip::token_output_stream::TokenOutputStream; use js_sys::Date; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; enum SelectedModel { M(blip::BlipForConditionalGeneration), Q(quantized_blip::BlipForConditionalGeneration), } impl SelectedModel { fn text_decoder_forward(&mut self, xs: &Tensor, img_xs: &Tensor) -> Result<Tensor, JsError> { match self { Self::M(m) => m .text_decoder() .forward(xs, img_xs) .map_err(|e| JsError::new(&e.to_string())), Self::Q(m) => m .text_decoder() .forward(xs, img_xs) .map_err(|e| JsError::new(&e.to_string())), } } fn reset_kv_cache(&mut self) { match self { Self::M(m) => m.reset_kv_cache(), Self::Q(m) => m.reset_kv_cache(), } } } #[wasm_bindgen] pub struct Model { model: SelectedModel, tokenizer: TokenOutputStream, } const SEP_TOKEN_ID: u32 = 102; #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, quantized: bool, ) -> Result<Model, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let tokenizer = TokenOutputStream::new(tokenizer); let config: blip::Config = serde_json::from_slice(&config)?; let device = Device::Cpu; let start = Date::now(); let model: SelectedModel = if quantized { let vb = quantized_blip::VarBuilder::from_gguf_buffer(&weights, &device)?; let model = quantized_blip::BlipForConditionalGeneration::new(&config, vb)?; SelectedModel::Q(model) } else { let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, &device)?; let model = blip::BlipForConditionalGeneration::new(&config, vb)?; SelectedModel::M(model) }; console_log!("model loaded in {:?}s", (Date::now() - start) / 1000.); Ok(Self { model, tokenizer }) } #[wasm_bindgen] pub fn generate_caption_from_image(&mut self, image: Vec<u8>) -> Result<String, JsError> { self.model.reset_kv_cache(); let device = Device::Cpu; console_log!("loading image as tensor"); let start = Date::now(); let image: Tensor = self.load_image(image)?.to_device(&device)?; console_log!("image loaded in {:?}s", (Date::now() - start) / 1000.); let start = Date::now(); let image_embeds: Tensor = match &mut self.model { SelectedModel::M(m) => image.unsqueeze(0)?.apply(m.vision_model())?, SelectedModel::Q(m) => image.unsqueeze(0)?.apply(m.vision_model())?, }; console_log!("image embedded in {:?}s", (Date::now() - start) / 1000.); let mut logits_processor = LogitsProcessor::new(299792458, None, None); let mut token_ids = vec![30522u32]; let mut text: String = "".to_string(); let start = Date::now(); for index in 0..1000 { let context_size = if index > 0 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = self.model.text_decoder_forward(&input_ids, &image_embeds)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; if token == SEP_TOKEN_ID { break; } token_ids.push(token); if let Some(t) = self.tokenizer.next_token(token)? { text.push_str(&t); } } if let Some(rest) = self .tokenizer .decode_rest() .map_err(|m| JsError::new(&m.to_string()))? { text.push_str(&rest); } console_log!("caption generated in {:?}s", (Date::now() - start) / 1000.); Ok(text) } } impl Model { fn load_image(&self, image: Vec<u8>) -> Result<Tensor, JsError> { let device = &Device::Cpu; let img = image::ImageReader::new(std::io::Cursor::new(image)) .with_guessed_format()? .decode() .map_err(|e| JsError::new(&e.to_string()))? .resize_to_fill(384, 384, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (384, 384, 3), device)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], device)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], device)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) .map_err(|e| JsError::new(&e.to_string())) } } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/blip/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/blip/src/bin/m.rs", "repo_id": "candle", "token_count": 2698 }
## Running Yolo Examples Here, we provide two examples of how to run YOLOv8 using a Candle-compiled WASM binary and runtimes. ### Pure Rust UI To build and test the UI made in Rust you will need [Trunk](https://trunkrs.dev/#install) From the `candle-wasm-examples/yolo` directory run: Download assets: ```bash wget -c https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg wget -c https://huggingface.co/lmz/candle-yolo-v8/resolve/main/yolov8s.safetensors ``` Run hot reload server: ```bash trunk serve --release --public-url / --port 8080 ``` ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Model, ModelPose } from "./build/m.js"; ``` The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/lib-example.html` in your browser.
candle/candle-wasm-examples/yolo/README.md/0
{ "file_path": "candle/candle-wasm-examples/yolo/README.md", "repo_id": "candle", "token_count": 412 }
#![allow(unused)] use candle::{ quantized::{self, k_quants, GgmlDType, GgmlType}, test_utils::to_vec2_round, Device, Module, Result, Tensor, }; use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn quantized_matmul_neg() -> Result<()> { let cpu = &Device::Cpu; let (m, k, n) = (3, 64, 4); let lhs = (0..(m * k)) .map(|v| v as f32 - (m * k) as f32 / 2.0) .collect::<Vec<_>>(); let tensor_lhs = Tensor::from_slice(&lhs, (m, k), cpu)?; let mut dst = vec![42.; 3 * 4]; let mut rhs_t = vec![k_quants::BlockQ4_0::zeros(); 8]; let rhs = (0..k * n) .map(|v| v as f32 - (k * n) as f32 / 3.0) .collect::<Vec<_>>(); let tensor_rhs = Tensor::from_slice(&rhs, (n, k), cpu)?.t()?; k_quants::BlockQ4_0::from_float(&rhs, &mut rhs_t)?; k_quants::matmul((m, k, n), &lhs, &rhs_t, &mut dst)?; assert_eq!( dst.iter().map(|x| x.round()).collect::<Vec<_>>(), &[ 243524.0, -19596.0, -285051.0, -549815.0, 23777.0, 21651.0, 19398.0, 18367.0, -196472.0, 63012.0, 324585.0, 587902.0 ] ); let mm = tensor_lhs.matmul(&tensor_rhs)?; assert_eq!( to_vec2_round(&mm, 0)?, &[ [244064.0, -20128.0, -284320.0, -548512.0], [23563.0, 21515.0, 19467.0, 17419.0], [-196939.0, 63157.0, 323253.0, 583349.0] ] ); let qtensor = quantized::QTensor::new(quantized::QStorage::Cpu(Box::new(rhs_t)), (4, 64))?; let matmul = quantized::QMatMul::from_qtensor(qtensor)?; let res = matmul.forward(&tensor_lhs)?; assert_eq!( to_vec2_round(&res, 0)?, &[ [243524.0, -19596.0, -285051.0, -549815.0], [23777.0, 21651.0, 19398.0, 18367.0], [-196472.0, 63012.0, 324585.0, 587902.0] ] ); Ok(()) } /// Creates a vector similarly to the one used in GGML unit tests: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L26-L30 fn create_ggml_like_vector(offset: f32) -> Vec<f32> { const GGML_TEST_SIZE: usize = 32 * 128; (0..GGML_TEST_SIZE) .map(|i| 0.1 + 2.0 * (i as f32 + offset).cos()) .collect() } /// Very simple dot product implementation fn vec_dot_reference(a: &[f32], b: &[f32]) -> f32 { a.iter().zip(b).map(|(a, b)| a * b).sum() } /// Returns the error achieved by the GGML matmul unit test. fn ggml_reference_matmul_error(dtype: GgmlDType) -> Result<f32> { let err = match dtype { GgmlDType::F16 => 0.000010, GgmlDType::Q2K => 0.004086, GgmlDType::Q3K => 0.016148, GgmlDType::Q4K => 0.002425, GgmlDType::Q5K => 0.000740, GgmlDType::Q6K => 0.000952, GgmlDType::Q4_0 => 0.001143, GgmlDType::Q4_1 => 0.007784, GgmlDType::Q5_0 => 0.001353, GgmlDType::Q5_1 => 0.001363, GgmlDType::Q8_0 => 0.000092, // Not from the ggml repo. GgmlDType::Q8K => 0.00065, _ => candle::bail!("No GGML results for quantization type {dtype:?}",), }; Ok(err) } /// Mirrores the GGML matmul unit test: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L76-L91 fn ggml_matmul_error_test<T: GgmlType>() -> Result<()> { const GGML_MAX_DOT_PRODUCT_ERROR: f32 = 0.02; let a = create_ggml_like_vector(0.0); let b = create_ggml_like_vector(1.0); let length = a.len(); let mut a_quant = vec![T::zeros(); length / T::BLCK_SIZE]; let mut b_quant = vec![T::VecDotType::zeros(); length / T::VecDotType::BLCK_SIZE]; T::from_float(&a, &mut a_quant)?; T::VecDotType::from_float(&b, &mut b_quant)?; let result = T::vec_dot(length, &a_quant, &b_quant)?; let result_unopt = T::vec_dot_unopt(length, &a_quant, &b_quant)?; let reference_result = vec_dot_reference(&a, &b); if (result - result_unopt).abs() / length as f32 > 1e-6 { candle::bail!( "the opt and unopt vec-dot returned different values, opt {result}, unopt {result_unopt}" ) } let error = (result - reference_result).abs() / length as f32; let ggml_error = ggml_reference_matmul_error(T::DTYPE)?; if !error.is_finite() || error > GGML_MAX_DOT_PRODUCT_ERROR { candle::bail!( "Dot product error {} exceeds max error {}", error, GGML_MAX_DOT_PRODUCT_ERROR ); } // We diverge slightly due to different rounding behavior / f16 to f32 conversions in GGML // => we use a slightly higher error threshold const ERROR_LENIENCY: f32 = 0.00001; if error - ERROR_LENIENCY > ggml_error { candle::bail!( "Dot product error {} exceeds ggml reference error {}", error, ggml_error ); } Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q40() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ4_0>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q50() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ5_0>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q80() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ8_0>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q2k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ2K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q3k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ3K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q4k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ4K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q5k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ5K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q6k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ6K>()?; Ok(()) } #[wasm_bindgen_test] fn quantized_matmul_q8k() -> Result<()> { ggml_matmul_error_test::<candle::quantized::k_quants::BlockQ8K>()?; Ok(()) }
candle/candle-wasm-tests/tests/quantized_tests.rs/0
{ "file_path": "candle/candle-wasm-tests/tests/quantized_tests.rs", "repo_id": "candle", "token_count": 3151 }
{{- define "name" -}} {{- default $.Release.Name | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- define "app.name" -}} chat-ui {{- end -}} {{- define "labels.standard" -}} release: {{ $.Release.Name | quote }} heritage: {{ $.Release.Service | quote }} chart: "{{ include "name" . }}" app: "{{ include "app.name" . }}" {{- end -}} {{- define "labels.resolver" -}} release: {{ $.Release.Name | quote }} heritage: {{ $.Release.Service | quote }} chart: "{{ include "name" . }}" app: "{{ include "app.name" . }}-resolver" {{- end -}}
chat-ui/chart/templates/_helpers.tpl/0
{ "file_path": "chat-ui/chart/templates/_helpers.tpl", "repo_id": "chat-ui", "token_count": 202 }
# Models Overview You can customize the parameters passed to the model or even use a new model by updating the `MODELS` variable in your `.env.local`. The default one can be found in `.env` and looks like this : ```ini MODELS=`[ { "name": "mistralai/Mistral-7B-Instruct-v0.2", "displayName": "mistralai/Mistral-7B-Instruct-v0.2", "description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.", "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/", "preprompt": "", "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.3, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 3072, "max_new_tokens": 1024, "stop": ["</s>"] }, "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ] } ]` ``` You can change things like the parameters, or customize the preprompt to better suit your needs. You can also add more models by adding more objects to the array, with different preprompts for example. ## Chat Prompt Template When querying the model for a chat response, the `chatPromptTemplate` template is used. `messages` is an array of chat messages, it has the format `[{ content: string }, ...]`. To identify if a message is a user message or an assistant message the `ifUser` and `ifAssistant` block helpers can be used. The following is the default `chatPromptTemplate`, although newlines and indentiation have been added for readability. You can find the prompts used in production for HuggingChat [here](https://github.com/huggingface/chat-ui/blob/main/PROMPTS.md). The templating language used is [Handlebars](https://www.npmjs.com/package/handlebars). ```handlebars {{preprompt}} {{#each messages}} {{#ifUser}}{{@root.userMessageToken}}{{content}}{{@root.userMessageEndToken}}{{/ifUser}} {{#ifAssistant }}{{@root.assistantMessageToken}}{{content}}{{@root.assistantMessageEndToken}}{{/ifAssistant}} {{/each}} {{assistantMessageToken}} ``` ## Custom endpoint authorization ### Basic and Bearer Custom endpoints may require authorization, depending on how you configure them. Authentication will usually be set either with `Basic` or `Bearer`. For `Basic` we will need to generate a base64 encoding of the username and password. `echo -n "USER:PASS" | base64` > VVNFUjpQQVNT For `Bearer` you can use a token, which can be grabbed from [here](https://huggingface.co/settings/tokens). You can then add the generated information and the `authorization` parameter to your `.env.local`. ```ini "endpoints": [ { "url": "https://HOST:PORT", "authorization": "Basic VVNFUjpQQVNT", } ] ``` Please note that if `HF_TOKEN` is also set or not empty, it will take precedence. ## Models hosted on multiple custom endpoints If the model being hosted will be available on multiple servers/instances add the `weight` parameter to your `.env.local`. The `weight` will be used to determine the probability of requesting a particular endpoint. ```ini "endpoints": [ { "url": "https://HOST:PORT", "weight": 1 }, { "url": "https://HOST:PORT", "weight": 2 } ... ] ``` ## Client Certificate Authentication (mTLS) Custom endpoints may require client certificate authentication, depending on how you configure them. To enable mTLS between Chat UI and your custom endpoint, you will need to set the `USE_CLIENT_CERTIFICATE` to `true`, and add the `CERT_PATH` and `KEY_PATH` parameters to your `.env.local`. These parameters should point to the location of the certificate and key files on your local machine. The certificate and key files should be in PEM format. The key file can be encrypted with a passphrase, in which case you will also need to add the `CLIENT_KEY_PASSWORD` parameter to your `.env.local`. If you're using a certificate signed by a private CA, you will also need to add the `CA_PATH` parameter to your `.env.local`. This parameter should point to the location of the CA certificate file on your local machine. If you're using a self-signed certificate, e.g. for testing or development purposes, you can set the `REJECT_UNAUTHORIZED` parameter to `false` in your `.env.local`. This will disable certificate validation, and allow Chat UI to connect to your custom endpoint. ## Specific Embedding Model A model can use any of the embedding models defined under `TEXT_EMBEDDING_MODELS`, (currently used when web searching). By default it will use the first embedding model, but it can be changed with the field `embeddingModel`: ```ini TEXT_EMBEDDING_MODELS = `[ { "name": "Xenova/gte-small", "chunkCharLength": 512, "endpoints": [ {"type": "transformersjs"} ] }, { "name": "intfloat/e5-base-v2", "chunkCharLength": 768, "endpoints": [ {"type": "tei", "url": "http://127.0.0.1:8080/", "authorization": "Basic VVNFUjpQQVNT"}, {"type": "tei", "url": "http://127.0.0.1:8081/"} ] } ]` MODELS=`[ { "name": "Ollama Mistral", "chatPromptTemplate": "...", "embeddingModel": "intfloat/e5-base-v2" "parameters": { ... }, "endpoints": [ ... ] } ]` ```
chat-ui/docs/source/configuration/models/overview.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/overview.md", "repo_id": "chat-ui", "token_count": 1993 }
# Architecture This document discusses the high level overview of the Chat UI codebase. If you're looking to contribute or just want to understand how the codebase works, this is the place for you! ## Overview Chat UI provides a simple interface connecting LLMs to external information and tools. The project uses [MongoDB](https://www.mongodb.com/) and [SvelteKit](https://kit.svelte.dev/) with [Tailwind](https://tailwindcss.com/). ## Code Map This section discusses various modules of the codebase briefly. The headings are not paths since the codebase structure may change. ### `routes` Provides all of the routes rendered with SSR via SvelteKit. The majority of backend and frontend logic can be found here, with some modules being pulled out into `lib` for the client and `lib/server` for the server. ### `textGeneration` Provides a standard interface for most chat features such as model output, web search, assistants and tools. Outputs `MessageUpdate`s which provide fine-grained updates on the request status such as new tokens and web search results. ### `endpoints`/`embeddingEndpoints` Provides a common streaming interface for many third party LLM and embedding providers. ### `websearch` Implements web search querying and RAG. See the [Web Search](../configuration/web-search) section for more information. ### `tools` Provides a common interface for external tools called by LLMs. See the [Tools](../configuration/models/tools) section for more information ### `migrations` Includes all MongoDB migrations for maintaining backwards compatibility across schema changes. Any changes to the schema must include a migration
chat-ui/docs/source/developing/architecture.md/0
{ "file_path": "chat-ui/docs/source/developing/architecture.md", "repo_id": "chat-ui", "token_count": 409 }
<!doctype html> <html lang="en" class="h-full"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <meta name="theme-color" content="rgb(249, 250, 251)" /> <script> if ( localStorage.theme === "dark" || (!("theme" in localStorage) && window.matchMedia("(prefers-color-scheme: dark)").matches) ) { document.documentElement.classList.add("dark"); document .querySelector('meta[name="theme-color"]') .setAttribute("content", "rgb(26, 36, 50)"); } // For some reason, Sveltekit doesn't let us load env variables from .env here, so we load it from hooks.server.ts window.gaId = "%gaId%"; </script> %sveltekit.head% </head> <body data-sveltekit-preload-data="hover" class="h-full dark:bg-gray-900"> <div id="app" class="contents h-full">%sveltekit.body%</div> <!-- Google Tag Manager --> <script> if (window.gaId) { const script = document.createElement("script"); script.src = "https://www.googletagmanager.com/gtag/js?id=" + window.gaId; script.async = true; document.head.appendChild(script); window.dataLayer = window.dataLayer || []; function gtag() { dataLayer.push(arguments); } gtag("js", new Date()); /// ^ See https://developers.google.com/tag-platform/gtagjs/install gtag("config", window.gaId); gtag("consent", "default", { ad_storage: "denied", analytics_storage: "denied" }); /// ^ See https://developers.google.com/tag-platform/gtagjs/reference#consent /// TODO: ask the user for their consent and update this with gtag('consent', 'update') } </script> </body> </html>
chat-ui/src/app.html/0
{ "file_path": "chat-ui/src/app.html", "repo_id": "chat-ui", "token_count": 668 }
<!-- @migration task: review uses of `navigating` --> <script lang="ts"> import { run } from "svelte/legacy"; import { navigating } from "$app/state"; import { createEventDispatcher } from "svelte"; import { browser } from "$app/environment"; import { base } from "$app/paths"; import { page } from "$app/stores"; import CarbonClose from "~icons/carbon/close"; import CarbonTextAlignJustify from "~icons/carbon/text-align-justify"; import IconNew from "$lib/components/icons/IconNew.svelte"; interface Props { isOpen?: boolean; title: string | undefined; children?: import("svelte").Snippet; } let { isOpen = false, title = $bindable(), children }: Props = $props(); run(() => { title = title ?? "New Chat"; }); let closeEl: HTMLButtonElement | undefined = $state(); let openEl: HTMLButtonElement | undefined = $state(); const dispatch = createEventDispatcher(); run(() => { if (navigating) { dispatch("toggle", false); } }); run(() => { if (isOpen && closeEl) { closeEl.focus(); } else if (!isOpen && browser && document.activeElement === closeEl) { openEl?.focus(); } }); </script> <nav class="flex h-12 items-center justify-between border-b bg-gray-50 px-3 dark:border-gray-800 dark:bg-gray-800/70 md:hidden" > <button type="button" class="-ml-3 flex size-12 shrink-0 items-center justify-center text-lg" onclick={() => dispatch("toggle", true)} aria-label="Open menu" bind:this={openEl}><CarbonTextAlignJustify /></button > {#await title} <div class="flex h-full items-center justify-center"></div> {:then title} <span class="truncate px-4">{title ?? ""}</span> {/await} <a class:invisible={!$page.params.id} href="{base}/" class="-mr-3 flex size-12 shrink-0 items-center justify-center text-lg"><IconNew /></a > </nav> <nav class="fixed inset-0 z-30 grid max-h-screen grid-cols-1 grid-rows-[auto,auto,1fr,auto] bg-white dark:bg-gray-900 {isOpen ? 'block' : 'hidden'}" > <div class="flex h-12 items-center px-4"> <button type="button" class="-mr-3 ml-auto flex size-12 items-center justify-center text-lg" onclick={() => dispatch("toggle", false)} aria-label="Close menu" bind:this={closeEl}><CarbonClose /></button > </div> {@render children?.()} </nav>
chat-ui/src/lib/components/MobileNav.svelte/0
{ "file_path": "chat-ui/src/lib/components/MobileNav.svelte", "repo_id": "chat-ui", "token_count": 862 }
<script lang="ts"> import type { Model } from "$lib/types/Model"; import { getTokenizer } from "$lib/utils/getTokenizer"; import type { PreTrainedTokenizer } from "@huggingface/transformers"; import { untrack } from "svelte"; interface Props { classNames?: string; prompt?: string; modelTokenizer: Exclude<Model["tokenizer"], undefined>; truncate?: number | undefined; } let { classNames = "", prompt = "", modelTokenizer, truncate = undefined }: Props = $props(); let tokenizer: Promise<PreTrainedTokenizer> = $derived(getTokenizer(modelTokenizer)); let nTokens = $state(0); $effect(() => { prompt && untrack(() => { tokenizer.then((tokenizer) => { const { input_ids } = tokenizer(prompt); nTokens = input_ids.size; }); }); }); let exceedLimit = $derived(nTokens > (truncate || Infinity)); </script> <div class={classNames}> <p class="peer text-sm {exceedLimit ? 'text-red-500 opacity-100' : 'opacity-60 hover:opacity-90'}" > {nTokens}{truncate ? `/${truncate}` : ""} </p> <div class="invisible absolute -top-6 right-0 whitespace-nowrap rounded bg-black px-1 text-sm text-white peer-hover:visible" > Tokens usage </div> </div>
chat-ui/src/lib/components/TokensCounter.svelte/0
{ "file_path": "chat-ui/src/lib/components/TokensCounter.svelte", "repo_id": "chat-ui", "token_count": 449 }
<script lang="ts"> import MarkdownRenderer from "./MarkdownRenderer.svelte"; import CarbonCaretDown from "~icons/carbon/caret-down"; interface Props { summary: string; content: string; loading?: boolean; } let { summary, content, loading = false }: Props = $props(); </script> <details class="group flex w-fit max-w-full flex-col rounded-xl border border-gray-200 bg-white shadow-sm dark:border-gray-800 dark:bg-gray-900" > <summary class=" grid min-w-72 cursor-pointer select-none grid-cols-[40px,1fr,24px] items-center gap-2.5 rounded-xl p-2 group-open:rounded-b-none hover:bg-gray-500/10" > <div class="relative grid aspect-square place-content-center overflow-hidden rounded-lg bg-gray-100 dark:bg-gray-800" > <div class="grid h-dvh place-items-center"> <svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 32 32"> <path class="stroke-gray-600 dark:stroke-gray-400" style="stroke-width: 1.9; fill: none; stroke-linecap: round; stroke-linejoin: round;" d="M16 6v3.33M16 6c0-2.65 3.25-4.3 5.4-2.62 1.2.95 1.6 2.65.95 4.04a3.63 3.63 0 0 1 4.61.16 3.45 3.45 0 0 1 .46 4.37 5.32 5.32 0 0 1 1.87 4.75c-.22 1.66-1.39 3.6-3.07 4.14M16 6c0-2.65-3.25-4.3-5.4-2.62a3.37 3.37 0 0 0-.95 4.04 3.65 3.65 0 0 0-4.6.16 3.37 3.37 0 0 0-.49 4.27 5.57 5.57 0 0 0-1.85 4.85 5.3 5.3 0 0 0 3.07 4.15M16 9.33v17.34m0-17.34c0 2.18 1.82 4 4 4m6.22 7.5c.67 1.3.56 2.91-.27 4.11a4.05 4.05 0 0 1-4.62 1.5c0 1.53-1.05 2.9-2.66 2.9A2.7 2.7 0 0 1 16 26.66m10.22-5.83a4.05 4.05 0 0 0-3.55-2.17m-16.9 2.18a4.05 4.05 0 0 0 .28 4.1c1 1.44 2.92 2.09 4.59 1.5 0 1.52 1.12 2.88 2.7 2.88A2.7 2.7 0 0 0 16 26.67M5.78 20.85a4.04 4.04 0 0 1 3.55-2.18" /> {#if loading} <path class="animate-pulse stroke-purple-700" style="stroke-width: 2; fill: none; stroke-linecap: round; stroke-linejoin: round; stroke-dasharray: 50;" d="M16 6v3.33M16 6c0-2.65 3.25-4.3 5.4-2.62 1.2.95 1.6 2.65.95 4.04a3.63 3.63 0 0 1 4.61.16 3.45 3.45 0 0 1 .46 4.37 5.32 5.32 0 0 1 1.87 4.75c-.22 1.66-1.39 3.6-3.07 4.14M16 6c0-2.65-3.25-4.3-5.4-2.62a3.37 3.37 0 0 0-.95 4.04 3.65 3.65 0 0 0-4.6.16 3.37 3.37 0 0 0-.49 4.27 5.57 5.57 0 0 0-1.85 4.85 5.3 5.3 0 0 0 3.07 4.15M16 9.33v17.34m0-17.34c0 2.18 1.82 4 4 4m6.22 7.5c.67 1.3.56 2.91-.27 4.11a4.05 4.05 0 0 1-4.62 1.5c0 1.53-1.05 2.9-2.66 2.9A2.7 2.7 0 0 1 16 26.66m10.22-5.83a4.05 4.05 0 0 0-3.55-2.17m-16.9 2.18a4.05 4.05 0 0 0 .28 4.1c1 1.44 2.92 2.09 4.59 1.5 0 1.52 1.12 2.88 2.7 2.88A2.7 2.7 0 0 0 16 26.67M5.78 20.85a4.04 4.04 0 0 1 3.55-2.18" > <animate attributeName="stroke-dashoffset" values="0;500" dur="12s" repeatCount="indefinite" /> </path> {/if} </svg> </div> </div> <dl class="leading-4"> <dd class="text-sm">Reasoning</dd> <dt class="flex items-center gap-1 truncate whitespace-nowrap text-[.82rem] text-gray-400" class:animate-pulse={loading} > {summary} </dt> </dl> <CarbonCaretDown class="size-6 text-gray-400 transition-transform group-open:rotate-180" /> </summary> <div class="space-y-4 border-t border-gray-200 px-5 pb-2 pt-2 text-sm text-gray-600 dark:border-gray-800 dark:text-gray-400" > <MarkdownRenderer {content} /> </div> </details> <style> details summary::-webkit-details-marker { display: none; } </style>
chat-ui/src/lib/components/chat/OpenReasoningResults.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/OpenReasoningResults.svelte", "repo_id": "chat-ui", "token_count": 1760 }
<script lang="ts"> import CarbonPause from "~icons/carbon/pause"; import CarbonPlay from "~icons/carbon/play"; interface Props { src: string; name: string; } let { src, name }: Props = $props(); let time = $state(0); let duration = $state(0); let paused = $state(true); function format(time: number) { if (isNaN(time)) return "..."; const minutes = Math.floor(time / 60); const seconds = Math.floor(time % 60); return `${minutes}:${seconds < 10 ? `0${seconds}` : seconds}`; } function seek(e: PointerEvent) { if (!e.currentTarget) return; const { left, width } = (e.currentTarget as HTMLElement).getBoundingClientRect(); let p = (e.clientX - left) / width; if (p < 0) p = 0; if (p > 1) p = 1; time = p * duration; } </script> <div class="flex h-14 w-72 items-center gap-4 rounded-2xl border border-gray-200 bg-white p-2.5 text-gray-600 shadow-sm transition-all dark:border-gray-800 dark:bg-gray-900 dark:text-gray-300" > <audio {src} bind:currentTime={time} bind:duration bind:paused preload="metadata" onended={() => { time = 0; }} ></audio> <button class="mx-auto my-auto aspect-square size-8 rounded-full border border-gray-400 bg-gray-100 dark:border-gray-800 dark:bg-gray-700" aria-label={paused ? "play" : "pause"} onclick={() => (paused = !paused)} > {#if paused} <CarbonPlay class="mx-auto my-auto text-gray-600 dark:text-gray-300" /> {:else} <CarbonPause class="mx-auto my-auto text-gray-600 dark:text-gray-300" /> {/if} </button> <div class="overflow-hidden"> <div class="truncate font-medium">{name}</div> {#if duration !== Infinity} <div class="flex items-center gap-2"> <span class="text-xs">{format(time)}</span> <div class="relative h-2 flex-1 rounded-full bg-gray-200 dark:bg-gray-700" onpointerdown={() => { paused = true; }} onpointerup={seek} > <div class="absolute inset-0 h-full bg-gray-400 dark:bg-gray-600" style="width: {(time / duration) * 100}%" ></div> </div> <span class="text-xs">{duration ? format(duration) : "--:--"}</span> </div> {/if} </div> </div>
chat-ui/src/lib/components/players/AudioPlayer.svelte/0
{ "file_path": "chat-ui/src/lib/components/players/AudioPlayer.svelte", "repo_id": "chat-ui", "token_count": 909 }
import type { Session } from "$lib/types/Session"; import type { User } from "$lib/types/User"; import type { Conversation } from "$lib/types/Conversation"; import { ObjectId } from "mongodb"; import { deleteConversations } from "./09-delete-empty-conversations"; import { afterAll, afterEach, beforeAll, describe, expect, test } from "vitest"; import { collections } from "$lib/server/database"; type Message = Conversation["messages"][number]; const userData = { _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), username: "new-username", name: "name", avatarUrl: "https://example.com/avatar.png", hfUserId: "9999999999", } satisfies User; Object.freeze(userData); const sessionForUser = { _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), userId: userData._id, sessionId: "session-id-9999999999", expiresAt: new Date(Date.now() + 1000 * 60 * 60 * 24), } satisfies Session; Object.freeze(sessionForUser); const userMessage = { from: "user", id: "user-message-id", content: "Hello, how are you?", } satisfies Message; const assistantMessage = { from: "assistant", id: "assistant-message-id", content: "I'm fine, thank you!", } satisfies Message; const systemMessage = { from: "system", id: "system-message-id", content: "This is a system message", } satisfies Message; const conversationBase = { _id: new ObjectId(), createdAt: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000), updatedAt: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000), model: "model-id", embeddingModel: "embedding-model-id", title: "title", messages: [], } satisfies Conversation; describe.sequential("Deleting discarded conversations", async () => { test("a conversation with no messages should get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, sessionId: sessionForUser.sessionId, }); const result = await deleteConversations(collections); expect(result).toBe(1); }); test("a conversation with no messages that is less than 1 hour old should not get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, sessionId: sessionForUser.sessionId, createdAt: new Date(Date.now() - 30 * 60 * 1000), }); const result = await deleteConversations(collections); expect(result).toBe(0); }); test("a conversation with only system messages should get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, sessionId: sessionForUser.sessionId, messages: [systemMessage], }); const result = await deleteConversations(collections); expect(result).toBe(1); }); test("a conversation with a user message should not get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, sessionId: sessionForUser.sessionId, messages: [userMessage], }); const result = await deleteConversations(collections); expect(result).toBe(0); }); test("a conversation with an assistant message should not get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, sessionId: sessionForUser.sessionId, messages: [assistantMessage], }); const result = await deleteConversations(collections); expect(result).toBe(0); }); test("a conversation with a mix of messages should not get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, sessionId: sessionForUser.sessionId, messages: [systemMessage, userMessage, assistantMessage, userMessage, assistantMessage], }); const result = await deleteConversations(collections); expect(result).toBe(0); }); test("a conversation with a userId and no sessionId should not get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, messages: [userMessage, assistantMessage], userId: userData._id, }); const result = await deleteConversations(collections); expect(result).toBe(0); }); test("a conversation with no userId or sessionId should get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, messages: [userMessage, assistantMessage], }); const result = await deleteConversations(collections); expect(result).toBe(1); }); test("a conversation with a sessionId that exists should not get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, messages: [userMessage, assistantMessage], sessionId: sessionForUser.sessionId, }); const result = await deleteConversations(collections); expect(result).toBe(0); }); test("a conversation with a userId and a sessionId that doesn't exist should NOT get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, userId: userData._id, messages: [userMessage, assistantMessage], sessionId: new ObjectId().toString(), }); const result = await deleteConversations(collections); expect(result).toBe(0); }); test("a conversation with only a sessionId that doesn't exist, should get deleted", async () => { await collections.conversations.insertOne({ ...conversationBase, messages: [userMessage, assistantMessage], sessionId: new ObjectId().toString(), }); const result = await deleteConversations(collections); expect(result).toBe(1); }); test("many conversations should get deleted", async () => { const conversations = Array.from({ length: 10010 }, () => ({ ...conversationBase, _id: new ObjectId(), })); await collections.conversations.insertMany(conversations); const result = await deleteConversations(collections); expect(result).toBe(10010); }); }); beforeAll(async () => { await collections.users.insertOne(userData); await collections.sessions.insertOne(sessionForUser); }); afterAll(async () => { await collections.users.deleteOne({ _id: userData._id, }); await collections.sessions.deleteOne({ _id: sessionForUser._id, }); await collections.conversations.deleteMany({}); }); afterEach(async () => { await collections.conversations.deleteMany({ _id: { $in: [conversationBase._id] }, }); });
chat-ui/src/lib/migrations/routines/09-delete-empty-conversations.spec.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/09-delete-empty-conversations.spec.ts", "repo_id": "chat-ui", "token_count": 2019 }
import { z } from "zod"; import type { Endpoint } from "../endpoints"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images"; import type { EndpointMessage } from "../endpoints"; import type { MessageFile } from "$lib/types/Message"; export const endpointBedrockParametersSchema = z.object({ weight: z.number().int().positive().default(1), type: z.literal("bedrock"), region: z.string().default("us-east-1"), model: z.any(), anthropicVersion: z.string().default("bedrock-2023-05-31"), isNova: z.boolean().default(false), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: [ "image/png", "image/jpeg", "image/webp", "image/avif", "image/tiff", "image/gif", ], preferredMimeType: "image/webp", maxSizeInMB: Infinity, maxWidth: 4096, maxHeight: 4096, }), }) .default({}), }); export async function endpointBedrock( input: z.input<typeof endpointBedrockParametersSchema> ): Promise<Endpoint> { const { region, model, anthropicVersion, multimodal, isNova } = endpointBedrockParametersSchema.parse(input); let BedrockRuntimeClient, InvokeModelWithResponseStreamCommand; try { ({ BedrockRuntimeClient, InvokeModelWithResponseStreamCommand } = await import( "@aws-sdk/client-bedrock-runtime" )); } catch (error) { throw new Error("Failed to import @aws-sdk/client-bedrock-runtime. Make sure it's installed."); } const client = new BedrockRuntimeClient({ region, }); const imageProcessor = makeImageProcessor(multimodal.image); return async ({ messages, preprompt, generateSettings }) => { let system = preprompt; // Use the first message as the system prompt if it's of type "system" if (messages?.[0]?.from === "system") { system = messages[0].content; messages = messages.slice(1); // Remove the first system message from the array } const formattedMessages = await prepareMessages(messages, isNova, imageProcessor); let tokenId = 0; const parameters = { ...model.parameters, ...generateSettings }; return (async function* () { const baseCommandParams = { contentType: "application/json", accept: "application/json", modelId: model.id, }; const maxTokens = parameters.max_new_tokens || 4096; let bodyContent; if (isNova) { bodyContent = { messages: formattedMessages, inferenceConfig: { maxTokens, topP: 0.1, temperature: 1.0, }, system: [{ text: system }], }; } else { bodyContent = { anthropic_version: anthropicVersion, max_tokens: maxTokens, messages: formattedMessages, system, }; } const command = new InvokeModelWithResponseStreamCommand({ ...baseCommandParams, body: Buffer.from(JSON.stringify(bodyContent), "utf-8"), trace: "DISABLED", }); const response = await client.send(command); let text = ""; for await (const item of response.body ?? []) { const chunk = JSON.parse(new TextDecoder().decode(item.chunk?.bytes)); if ("contentBlockDelta" in chunk || chunk.type === "content_block_delta") { const chunkText = chunk.contentBlockDelta?.delta?.text || chunk.delta?.text || ""; text += chunkText; yield { token: { id: tokenId++, text: chunkText, logprob: 0, special: false, }, generated_text: null, details: null, } satisfies TextGenerationStreamOutput; } else if ("messageStop" in chunk || chunk.type === "message_stop") { yield { token: { id: tokenId++, text: "", logprob: 0, special: true, }, generated_text: text, details: null, } satisfies TextGenerationStreamOutput; } } })(); }; } // Prepare the messages excluding system prompts async function prepareMessages( messages: EndpointMessage[], isNova: boolean, imageProcessor: ReturnType<typeof makeImageProcessor> ) { const formattedMessages = []; for (const message of messages) { const content = []; if (message.files?.length) { content.push(...(await prepareFiles(imageProcessor, isNova, message.files))); } if (isNova) { content.push({ text: message.content }); } else { content.push({ type: "text", text: message.content }); } const lastMessage = formattedMessages[formattedMessages.length - 1]; if (lastMessage && lastMessage.role === message.from) { // If the last message has the same role, merge the content lastMessage.content.push(...content); } else { formattedMessages.push({ role: message.from, content }); } } return formattedMessages; } // Process files and convert them to base64 encoded strings async function prepareFiles( imageProcessor: ReturnType<typeof makeImageProcessor>, isNova: boolean, files: MessageFile[] ) { const processedFiles = await Promise.all(files.map(imageProcessor)); if (isNova) { return processedFiles.map((file) => ({ image: { format: file.mime.substring("image/".length), source: { bytes: file.image.toString("base64") }, }, })); } else { return processedFiles.map((file) => ({ type: "image", source: { type: "base64", media_type: file.mime, data: file.image.toString("base64") }, })); } }
chat-ui/src/lib/server/endpoints/aws/endpointBedrock.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/aws/endpointBedrock.ts", "repo_id": "chat-ui", "token_count": 2027 }
import { randomUUID } from "$lib/utils/randomUuid"; import { timeout } from "$lib/utils/timeout"; import { logger } from "./logger"; type ExitHandler = () => void | Promise<void>; type ExitHandlerUnsubscribe = () => void; const listeners = new Map<string, ExitHandler>(); export function onExit(cb: ExitHandler): ExitHandlerUnsubscribe { const uuid = randomUUID(); listeners.set(uuid, cb); return () => { listeners.delete(uuid); }; } async function runExitHandler(handler: ExitHandler): Promise<void> { return timeout(Promise.resolve().then(handler), 30_000).catch((err) => { logger.error(err, "Exit handler failed to run"); }); } export function initExitHandler() { let signalCount = 0; const exitHandler = async () => { signalCount++; if (signalCount === 1) { logger.info("Received signal... Exiting"); await Promise.all(Array.from(listeners.values()).map(runExitHandler)); logger.info("All exit handlers ran... Waiting for svelte server to exit"); } if (signalCount === 3) { logger.warn("Received 3 signals... Exiting immediately"); process.exit(1); } }; process.on("SIGINT", exitHandler); process.on("SIGTERM", exitHandler); }
chat-ui/src/lib/server/exitHandler.ts/0
{ "file_path": "chat-ui/src/lib/server/exitHandler.ts", "repo_id": "chat-ui", "token_count": 402 }
import { collectDefaultMetrics, Registry, Counter, Summary } from "prom-client"; import express from "express"; import { logger } from "$lib/server/logger"; import { env } from "$env/dynamic/private"; import type { Model } from "$lib/types/Model"; import { onExit } from "./exitHandler"; import { promisify } from "util"; interface Metrics { model: { conversationsTotal: Counter<Model["id"]>; messagesTotal: Counter<Model["id"]>; tokenCountTotal: Counter<Model["id"]>; timePerOutputToken: Summary<Model["id"]>; timeToFirstToken: Summary<Model["id"]>; latency: Summary<Model["id"]>; votesPositive: Counter<Model["id"]>; votesNegative: Counter<Model["id"]>; }; webSearch: { requestCount: Counter; pageFetchCount: Counter; pageFetchCountError: Counter; pageFetchDuration: Summary; embeddingDuration: Summary; }; tool: { toolUseCount: Counter<string>; toolUseCountError: Counter<string>; toolUseDuration: Summary<string>; timeToChooseTools: Summary; }; } export class MetricsServer { private static instance: MetricsServer; private metrics: Metrics; private constructor() { const app = express(); const port = Number(env.METRICS_PORT || "5565"); if (isNaN(port) || port < 0 || port > 65535) { logger.warn(`Invalid value for METRICS_PORT: ${env.METRICS_PORT}`); } if (env.METRICS_ENABLED !== "false" && env.METRICS_ENABLED !== "true") { logger.warn(`Invalid value for METRICS_ENABLED: ${env.METRICS_ENABLED}`); } if (env.METRICS_ENABLED === "true") { const server = app.listen(port, () => { logger.info(`Metrics server listening on port ${port}`); }); const closeServer = promisify(server.close); onExit(async () => { logger.info("Disconnecting metrics server ..."); await closeServer(); logger.info("Server stopped ..."); }); } const register = new Registry(); collectDefaultMetrics({ register }); this.metrics = { model: { conversationsTotal: new Counter({ name: "model_conversations_total", help: "Total number of conversations", labelNames: ["model"], registers: [register], }), messagesTotal: new Counter({ name: "model_messages_total", help: "Total number of messages", labelNames: ["model"], registers: [register], }), tokenCountTotal: new Counter({ name: "model_token_count_total", help: "Total number of tokens", labelNames: ["model"], registers: [register], }), timePerOutputToken: new Summary({ name: "model_time_per_output_token_ms", help: "Time per output token in ms", labelNames: ["model"], registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), timeToFirstToken: new Summary({ name: "model_time_to_first_token_ms", help: "Time to first token", labelNames: ["model"], registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), latency: new Summary({ name: "model_latency_ms", help: "Total latency until end of answer", labelNames: ["model"], registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), votesPositive: new Counter({ name: "model_votes_positive", help: "Total number of positive votes on messages generated by the model", labelNames: ["model"], registers: [register], }), votesNegative: new Counter({ name: "model_votes_negative", help: "Total number of negative votes on messages generated by the model", labelNames: ["model"], registers: [register], }), }, webSearch: { requestCount: new Counter({ name: "web_search_request_count", help: "Total number of web search requests", registers: [register], }), pageFetchCount: new Counter({ name: "web_search_page_fetch_count", help: "Total number of web search page fetches", registers: [register], }), pageFetchCountError: new Counter({ name: "web_search_page_fetch_count_error", help: "Total number of web search page fetch errors", registers: [register], }), pageFetchDuration: new Summary({ name: "web_search_page_fetch_duration_ms", help: "Web search page fetch duration", registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), embeddingDuration: new Summary({ name: "web_search_embedding_duration_ms", help: "Web search embedding duration", registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), }, tool: { toolUseCount: new Counter({ name: "tool_use_count", help: "Total number of tool uses", labelNames: ["tool"], registers: [register], }), toolUseCountError: new Counter({ name: "tool_use_count_error", help: "Total number of tool use errors", labelNames: ["tool"], registers: [register], }), toolUseDuration: new Summary({ name: "tool_use_duration_ms", help: "Tool use duration", labelNames: ["tool"], registers: [register], maxAgeSeconds: 30 * 60, // longer duration since we use this to give feedback to the user ageBuckets: 5, }), timeToChooseTools: new Summary({ name: "time_to_choose_tools_ms", help: "Time to choose tools", labelNames: ["model"], registers: [register], maxAgeSeconds: 5 * 60, ageBuckets: 5, }), }, }; app.get("/metrics", (req, res) => { register.metrics().then((metrics) => { res.set("Content-Type", "text/plain"); res.send(metrics); }); }); } public static getInstance(): MetricsServer { if (!MetricsServer.instance) { MetricsServer.instance = new MetricsServer(); } return MetricsServer.instance; } public static getMetrics(): Metrics { return MetricsServer.getInstance().metrics; } }
chat-ui/src/lib/server/metrics.ts/0
{ "file_path": "chat-ui/src/lib/server/metrics.ts", "repo_id": "chat-ui", "token_count": 2367 }
import type { ConfigTool } from "$lib/types/Tool"; import { ObjectId } from "mongodb"; import { runWebSearch } from "../../websearch/runWebSearch"; const websearch: ConfigTool = { _id: new ObjectId("00000000000000000000000A"), type: "config", description: "Search the web for answers to the user's query", color: "blue", icon: "wikis", displayName: "Web Search", name: "websearch", endpoint: null, inputs: [ { name: "query", type: "str", description: "A search query which will be used to fetch the most relevant snippets regarding the user's query", paramType: "required", }, ], outputComponent: null, outputComponentIdx: null, showOutput: false, async *call({ query }, { conv, assistant, messages }) { const webSearchToolResults = yield* runWebSearch(conv, messages, assistant?.rag, String(query)); const webSearchContext = webSearchToolResults?.contextSources .map(({ context }, idx) => `Source [${idx + 1}]\n${context.trim()}`) .join("\n\n----------\n\n"); return { outputs: [ { websearch: webSearchContext + "\n\nWhen answering the question, you must reference the sources you used inline by wrapping the index in brackets like this: [1]. If multiple sources are used, you must reference each one of them without commas like this: [1][2][3].", }, ], display: false, }; }, }; export default websearch;
chat-ui/src/lib/server/tools/web/search.ts/0
{ "file_path": "chat-ui/src/lib/server/tools/web/search.ts", "repo_id": "chat-ui", "token_count": 480 }
import type { BackendModel } from "$lib/server/models"; export type Model = Pick< BackendModel, | "id" | "name" | "displayName" | "websiteUrl" | "datasetName" | "promptExamples" | "parameters" | "description" | "logoUrl" | "modelUrl" | "tokenizer" | "datasetUrl" | "preprompt" | "multimodal" | "multimodalAcceptedMimetypes" | "unlisted" | "tools" | "hasInferenceAPI" >;
chat-ui/src/lib/types/Model.ts/0
{ "file_path": "chat-ui/src/lib/types/Model.ts", "repo_id": "chat-ui", "token_count": 175 }
/** * A debounce function that works in both browser and Nodejs. * For pure Nodejs work, prefer the `Debouncer` class. */ export function debounce<T extends unknown[]>( callback: (...rest: T) => unknown, limit: number ): (...rest: T) => void { let timer: ReturnType<typeof setTimeout>; return function (...rest) { clearTimeout(timer); timer = setTimeout(() => { callback(...rest); }, limit); }; }
chat-ui/src/lib/utils/debounce.ts/0
{ "file_path": "chat-ui/src/lib/utils/debounce.ts", "repo_id": "chat-ui", "token_count": 138 }
export function parseStringToList(links: unknown): string[] { if (typeof links !== "string") { throw new Error("Expected a string"); } return links .split(",") .map((link) => link.trim()) .filter((link) => link.length > 0); }
chat-ui/src/lib/utils/parseStringToList.ts/0
{ "file_path": "chat-ui/src/lib/utils/parseStringToList.ts", "repo_id": "chat-ui", "token_count": 86 }
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { describe, expect, it } from "vitest"; import { insertLegacyConversation, insertLinearBranchConversation, insertSideBranchesConversation, } from "./treeHelpers.spec"; import { buildSubtree } from "./buildSubtree"; describe("buildSubtree", () => { it("a subtree in a legacy conversation should be just a slice", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); // check middle const id = conv.messages[2].id; const subtree = buildSubtree(conv, id); expect(subtree).toEqual(conv.messages.slice(0, 3)); // check zero const id2 = conv.messages[0].id; const subtree2 = buildSubtree(conv, id2); expect(subtree2).toEqual(conv.messages.slice(0, 1)); //check full length const id3 = conv.messages[conv.messages.length - 1].id; const subtree3 = buildSubtree(conv, id3); expect(subtree3).toEqual(conv.messages); }); it("a subtree in a linear branch conversation should be the ancestors and the message", async () => { const convId = await insertLinearBranchConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); // check middle const id = conv.messages[1].id; const subtree = buildSubtree(conv, id); expect(subtree).toEqual([conv.messages[0], conv.messages[1]]); // check zero const id2 = conv.messages[0].id; const subtree2 = buildSubtree(conv, id2); expect(subtree2).toEqual([conv.messages[0]]); //check full length const id3 = conv.messages[conv.messages.length - 1].id; const subtree3 = buildSubtree(conv, id3); expect(subtree3).toEqual(conv.messages); }); it("should throw an error if the message is not found", async () => { const convId = await insertLinearBranchConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const id = "not-a-real-id-test"; expect(() => buildSubtree(conv, id)).toThrow("Message not found"); }); it("should throw an error if the ancestor is not found", async () => { const convId = await insertLinearBranchConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const id = "1-1-1-1-2"; conv.messages[1].ancestors = ["not-a-real-id-test"]; expect(() => buildSubtree(conv, id)).toThrow("Ancestor not found"); }); it("should work on empty conversations", () => { const conv = { _id: new ObjectId(), rootMessageId: undefined, messages: [], }; const subtree = buildSubtree(conv, "not-a-real-id-test"); expect(subtree).toEqual([]); }); it("should work for conversation with subtrees", async () => { const convId = await insertSideBranchesConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const subtree = buildSubtree(conv, "1-1-1-1-2"); expect(subtree).toEqual([conv.messages[0], conv.messages[1]]); const subtree2 = buildSubtree(conv, "1-1-1-1-4"); expect(subtree2).toEqual([ conv.messages[0], conv.messages[1], conv.messages[2], conv.messages[3], ]); const subtree3 = buildSubtree(conv, "1-1-1-1-6"); expect(subtree3).toEqual([conv.messages[0], conv.messages[4], conv.messages[5]]); const subtree4 = buildSubtree(conv, "1-1-1-1-7"); expect(subtree4).toEqual([conv.messages[0], conv.messages[4], conv.messages[6]]); }); });
chat-ui/src/lib/utils/tree/buildSubtree.spec.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/buildSubtree.spec.ts", "repo_id": "chat-ui", "token_count": 1375 }
import { collections } from "$lib/server/database"; import { models } from "$lib/server/models"; import { authCondition } from "$lib/server/auth"; import type { Conversation } from "$lib/types/Conversation"; import { CONV_NUM_PER_PAGE } from "$lib/constants/pagination"; export async function GET({ locals, url }) { const p = parseInt(url.searchParams.get("p") ?? "0"); if (locals.user?._id || locals.sessionId) { const convs = await collections.conversations .find({ ...authCondition(locals), }) .project<Pick<Conversation, "_id" | "title" | "updatedAt" | "model" | "assistantId">>({ title: 1, updatedAt: 1, model: 1, assistantId: 1, }) .sort({ updatedAt: -1 }) .skip(p * CONV_NUM_PER_PAGE) .limit(CONV_NUM_PER_PAGE) .toArray(); if (convs.length === 0) { return Response.json([]); } const res = convs.map((conv) => ({ _id: conv._id, id: conv._id, // legacy param iOS title: conv.title, updatedAt: conv.updatedAt, model: conv.model, modelId: conv.model, // legacy param iOS assistantId: conv.assistantId, modelTools: models.find((m) => m.id == conv.model)?.tools ?? false, })); return Response.json(res); } else { return Response.json({ message: "Must have session cookie" }, { status: 401 }); } }
chat-ui/src/routes/api/conversations/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/conversations/+server.ts", "repo_id": "chat-ui", "token_count": 510 }
import { env } from "$env/dynamic/private"; import { startOfHour } from "date-fns"; import { authCondition, requiresUser } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { models, validModelIdSchema } from "$lib/server/models"; import { ERROR_MESSAGES } from "$lib/stores/errors"; import type { Message } from "$lib/types/Message"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { z } from "zod"; import { MessageReasoningUpdateType, MessageUpdateStatus, MessageUpdateType, type MessageUpdate, } from "$lib/types/MessageUpdate"; import { uploadFile } from "$lib/server/files/uploadFile"; import { convertLegacyConversation } from "$lib/utils/tree/convertLegacyConversation"; import { isMessageId } from "$lib/utils/tree/isMessageId"; import { buildSubtree } from "$lib/utils/tree/buildSubtree.js"; import { addChildren } from "$lib/utils/tree/addChildren.js"; import { addSibling } from "$lib/utils/tree/addSibling.js"; import { usageLimits } from "$lib/server/usageLimits"; import { MetricsServer } from "$lib/server/metrics"; import { textGeneration } from "$lib/server/textGeneration"; import type { TextGenerationContext } from "$lib/server/textGeneration/types"; import { logger } from "$lib/server/logger.js"; import { documentParserToolId } from "$lib/utils/toolIds.js"; export async function POST({ request, locals, params, getClientAddress }) { const id = z.string().parse(params.id); const convId = new ObjectId(id); const promptedAt = new Date(); const userId = locals.user?._id ?? locals.sessionId; // check user if (!userId) { error(401, "Unauthorized"); } // check if the user has access to the conversation const convBeforeCheck = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (convBeforeCheck && !convBeforeCheck.rootMessageId) { const res = await collections.conversations.updateOne( { _id: convId, }, { $set: { ...convBeforeCheck, ...convertLegacyConversation(convBeforeCheck), }, } ); if (!res.acknowledged) { error(500, "Failed to convert conversation"); } } const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (!conv) { error(404, "Conversation not found"); } // register the event for ratelimiting await collections.messageEvents.insertOne({ userId, createdAt: new Date(), ip: getClientAddress(), }); const messagesBeforeLogin = env.MESSAGES_BEFORE_LOGIN ? parseInt(env.MESSAGES_BEFORE_LOGIN) : 0; // guest mode check if (!locals.user?._id && requiresUser && messagesBeforeLogin) { const totalMessages = ( await collections.conversations .aggregate([ { $match: { ...authCondition(locals), "messages.from": "assistant" } }, { $project: { messages: 1 } }, { $limit: messagesBeforeLogin + 1 }, { $unwind: "$messages" }, { $match: { "messages.from": "assistant" } }, { $count: "messages" }, ]) .toArray() )[0]?.messages ?? 0; if (totalMessages > messagesBeforeLogin) { error(429, "Exceeded number of messages before login"); } } if (usageLimits?.messagesPerMinute) { // check if the user is rate limited const nEvents = Math.max( await collections.messageEvents.countDocuments({ userId, createdAt: { $gte: new Date(Date.now() - 60_000) }, }), await collections.messageEvents.countDocuments({ ip: getClientAddress(), createdAt: { $gte: new Date(Date.now() - 60_000) }, }) ); if (nEvents > usageLimits.messagesPerMinute) { error(429, ERROR_MESSAGES.rateLimited); } } if (usageLimits?.messages && conv.messages.length > usageLimits.messages) { error( 429, `This conversation has more than ${usageLimits.messages} messages. Start a new one to continue` ); } // fetch the model const model = models.find((m) => m.id === conv.model); if (!model) { error(410, "Model not available anymore"); } // finally parse the content of the request const form = await request.formData(); const json = form.get("data"); if (!json || typeof json !== "string") { error(400, "Invalid request"); } const { inputs: newPrompt, id: messageId, is_retry: isRetry, is_continue: isContinue, web_search: webSearch, tools: toolsPreferences, } = z .object({ id: z.string().uuid().refine(isMessageId).optional(), // parent message id to append to for a normal message, or the message id for a retry/continue inputs: z.optional( z .string() .min(1) .transform((s) => s.replace(/\r\n/g, "\n")) ), is_retry: z.optional(z.boolean()), is_continue: z.optional(z.boolean()), web_search: z.optional(z.boolean()), tools: z.array(z.string()).optional(), files: z.optional( z.array( z.object({ type: z.literal("base64").or(z.literal("hash")), name: z.string(), value: z.string(), mime: z.string(), }) ) ), }) .parse(JSON.parse(json)); const inputFiles = await Promise.all( form .getAll("files") .filter((entry): entry is File => entry instanceof File && entry.size > 0) .map(async (file) => { const [type, ...name] = file.name.split(";"); return { type: z.literal("base64").or(z.literal("hash")).parse(type), value: await file.text(), mime: file.type, name: name.join(";"), }; }) ); // Check for PDF files in the input const hasPdfFiles = inputFiles?.some((file) => file.mime === "application/pdf") ?? false; // Check for existing PDF files in the conversation const hasPdfInConversation = conv.messages?.some((msg) => msg.files?.some((file) => file.mime === "application/pdf")) ?? false; if (usageLimits?.messageLength && (newPrompt?.length ?? 0) > usageLimits.messageLength) { error(400, "Message too long."); } // each file is either: // base64 string requiring upload to the server // hash pointing to an existing file const hashFiles = inputFiles?.filter((file) => file.type === "hash") ?? []; const b64Files = inputFiles ?.filter((file) => file.type !== "hash") .map((file) => { const blob = Buffer.from(file.value, "base64"); return new File([blob], file.name, { type: file.mime }); }) ?? []; // check sizes // todo: make configurable if (b64Files.some((file) => file.size > 10 * 1024 * 1024)) { error(413, "File too large, should be <10MB"); } const uploadedFiles = await Promise.all(b64Files.map((file) => uploadFile(file, conv))).then( (files) => [...files, ...hashFiles] ); // we will append tokens to the content of this message let messageToWriteToId: Message["id"] | undefined = undefined; // used for building the prompt, subtree of the conversation that goes from the latest message to the root let messagesForPrompt: Message[] = []; if (isContinue && messageId) { // if it's the last message and we continue then we build the prompt up to the last message // we will strip the end tokens afterwards when the prompt is built if ((conv.messages.find((msg) => msg.id === messageId)?.children?.length ?? 0) > 0) { error(400, "Can only continue the last message"); } messageToWriteToId = messageId; messagesForPrompt = buildSubtree(conv, messageId); } else if (isRetry && messageId) { // two cases, if we're retrying a user message with a newPrompt set, // it means we're editing a user message // if we're retrying on an assistant message, newPrompt cannot be set // it means we're retrying the last assistant message for a new answer const messageToRetry = conv.messages.find((message) => message.id === messageId); if (!messageToRetry) { error(404, "Message not found"); } if (messageToRetry.from === "user" && newPrompt) { // add a sibling to this message from the user, with the alternative prompt // add a children to that sibling, where we can write to const newUserMessageId = addSibling( conv, { from: "user", content: newPrompt, files: uploadedFiles, createdAt: new Date(), updatedAt: new Date(), }, messageId ); messageToWriteToId = addChildren( conv, { from: "assistant", content: "", createdAt: new Date(), updatedAt: new Date(), }, newUserMessageId ); messagesForPrompt = buildSubtree(conv, newUserMessageId); } else if (messageToRetry.from === "assistant") { // we're retrying an assistant message, to generate a new answer // just add a sibling to the assistant answer where we can write to messageToWriteToId = addSibling( conv, { from: "assistant", content: "", createdAt: new Date(), updatedAt: new Date() }, messageId ); messagesForPrompt = buildSubtree(conv, messageId); messagesForPrompt.pop(); // don't need the latest assistant message in the prompt since we're retrying it } } else { // just a normal linear conversation, so we add the user message // and the blank assistant message back to back const newUserMessageId = addChildren( conv, { from: "user", content: newPrompt ?? "", files: uploadedFiles, createdAt: new Date(), updatedAt: new Date(), }, messageId ); messageToWriteToId = addChildren( conv, { from: "assistant", content: "", createdAt: new Date(), updatedAt: new Date(), }, newUserMessageId ); // build the prompt from the user message messagesForPrompt = buildSubtree(conv, newUserMessageId); } const messageToWriteTo = conv.messages.find((message) => message.id === messageToWriteToId); if (!messageToWriteTo) { error(500, "Failed to create message"); } if (messagesForPrompt.length === 0) { error(500, "Failed to create prompt"); } // update the conversation with the new messages await collections.conversations.updateOne( { _id: convId }, { $set: { messages: conv.messages, title: conv.title, updatedAt: new Date() } } ); let doneStreaming = false; let lastTokenTimestamp: undefined | Date = undefined; // we now build the stream const stream = new ReadableStream({ async start(controller) { messageToWriteTo.updates ??= []; async function update(event: MessageUpdate) { if (!messageToWriteTo || !conv) { throw Error("No message or conversation to write events to"); } // Add token to content or skip if empty if (event.type === MessageUpdateType.Stream) { if (event.token === "") return; messageToWriteTo.content += event.token; // add to token total MetricsServer.getMetrics().model.tokenCountTotal.inc({ model: model?.id }); // if this is the first token, add to time to first token if (!lastTokenTimestamp) { MetricsServer.getMetrics().model.timeToFirstToken.observe( { model: model?.id }, Date.now() - promptedAt.getTime() ); lastTokenTimestamp = new Date(); } // add to time per token MetricsServer.getMetrics().model.timePerOutputToken.observe( { model: model?.id }, Date.now() - (lastTokenTimestamp ?? promptedAt).getTime() ); lastTokenTimestamp = new Date(); } else if ( event.type === MessageUpdateType.Reasoning && event.subtype === MessageReasoningUpdateType.Stream ) { messageToWriteTo.reasoning ??= ""; messageToWriteTo.reasoning += event.token; } // Set the title else if (event.type === MessageUpdateType.Title) { conv.title = event.title; await collections.conversations.updateOne( { _id: convId }, { $set: { title: conv?.title, updatedAt: new Date() } } ); } // Set the final text and the interrupted flag else if (event.type === MessageUpdateType.FinalAnswer) { messageToWriteTo.interrupted = event.interrupted; messageToWriteTo.content = initialMessageContent + event.text; // add to latency MetricsServer.getMetrics().model.latency.observe( { model: model?.id }, Date.now() - promptedAt.getTime() ); } // Add file else if (event.type === MessageUpdateType.File) { messageToWriteTo.files = [ ...(messageToWriteTo.files ?? []), { type: "hash", name: event.name, value: event.sha, mime: event.mime }, ]; } // Append to the persistent message updates if it's not a stream update if ( event.type !== MessageUpdateType.Stream && !( event.type === MessageUpdateType.Status && event.status === MessageUpdateStatus.KeepAlive ) && !( event.type === MessageUpdateType.Reasoning && event.subtype === MessageReasoningUpdateType.Stream ) ) { messageToWriteTo?.updates?.push(event); } // Avoid remote keylogging attack executed by watching packet lengths // by padding the text with null chars to a fixed length // https://cdn.arstechnica.net/wp-content/uploads/2024/03/LLM-Side-Channel.pdf if (event.type === MessageUpdateType.Stream) { event = { ...event, token: event.token.padEnd(16, "\0") }; } // Send the update to the client controller.enqueue(JSON.stringify(event) + "\n"); // Send 4096 of spaces to make sure the browser doesn't blocking buffer that holding the response if (event.type === MessageUpdateType.FinalAnswer) { controller.enqueue(" ".repeat(4096)); } } await collections.conversations.updateOne( { _id: convId }, { $set: { title: conv.title, updatedAt: new Date() } } ); messageToWriteTo.updatedAt = new Date(); let hasError = false; const initialMessageContent = messageToWriteTo.content; try { const ctx: TextGenerationContext = { model, endpoint: await model.getEndpoint(), conv, messages: messagesForPrompt, assistant: undefined, isContinue: isContinue ?? false, webSearch: webSearch ?? false, toolsPreference: [ ...(toolsPreferences ?? []), ...(hasPdfFiles || hasPdfInConversation ? [documentParserToolId] : []), // Add document parser tool if PDF files are present ], promptedAt, ip: getClientAddress(), username: locals.user?.username, }; // run the text generation and send updates to the client for await (const event of textGeneration(ctx)) await update(event); } catch (e) { hasError = true; await update({ type: MessageUpdateType.Status, status: MessageUpdateStatus.Error, message: (e as Error).message, }); logger.error(e); } finally { // check if no output was generated if (!hasError && messageToWriteTo.content === initialMessageContent) { await update({ type: MessageUpdateType.Status, status: MessageUpdateStatus.Error, message: "No output was generated. Something went wrong.", }); } } await collections.conversations.updateOne( { _id: convId }, { $set: { messages: conv.messages, title: conv?.title, updatedAt: new Date() } } ); // used to detect if cancel() is called bc of interrupt or just because the connection closes doneStreaming = true; controller.close(); }, async cancel() { if (doneStreaming) return; await collections.conversations.updateOne( { _id: convId }, { $set: { messages: conv.messages, title: conv.title, updatedAt: new Date() } } ); }, }); if (conv.assistantId) { await collections.assistantStats.updateOne( { assistantId: conv.assistantId, "date.at": startOfHour(new Date()), "date.span": "hour" }, { $inc: { count: 1 } }, { upsert: true } ); } const metrics = MetricsServer.getMetrics(); metrics.model.messagesTotal.inc({ model: model?.id }); // Todo: maybe we should wait for the message to be saved before ending the response - in case of errors return new Response(stream, { headers: { "Content-Type": "text/event-stream", }, }); } export async function DELETE({ locals, params }) { const convId = new ObjectId(params.id); const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (!conv) { error(404, "Conversation not found"); } await collections.conversations.deleteOne({ _id: conv._id }); return new Response(); } export async function PATCH({ request, locals, params }) { const values = z .object({ title: z.string().trim().min(1).max(100).optional(), model: validModelIdSchema.optional(), }) .parse(await request.json()); const convId = new ObjectId(params.id); const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (!conv) { error(404, "Conversation not found"); } await collections.conversations.updateOne( { _id: convId, }, { $set: values, } ); return new Response(); }
chat-ui/src/routes/conversation/[id]/+server.ts/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/+server.ts", "repo_id": "chat-ui", "token_count": 6234 }
import ModelThumbnail from "./ModelThumbnail.svelte"; import { redirect, type RequestHandler } from "@sveltejs/kit"; import type { SvelteComponent } from "svelte"; import { Resvg } from "@resvg/resvg-js"; import satori from "satori"; import { html } from "satori-html"; import InterRegular from "$lib/server/fonts/Inter-Regular.ttf"; import InterBold from "$lib/server/fonts/Inter-Bold.ttf"; import { base } from "$app/paths"; import { models } from "$lib/server/models"; export const GET: RequestHandler = (async ({ params }) => { const model = models.find(({ id }) => id === params.model); if (!model || model.unlisted) { redirect(302, `${base}/`); } const renderedComponent = (ModelThumbnail as unknown as SvelteComponent).render({ name: model.name, logoUrl: model.logoUrl, }); const reactLike = html( "<style>" + renderedComponent.css.code + "</style>" + renderedComponent.html ); const svg = await satori(reactLike, { width: 1200, height: 648, fonts: [ { name: "Inter", data: InterRegular as unknown as ArrayBuffer, weight: 500, }, { name: "Inter", data: InterBold as unknown as ArrayBuffer, weight: 700, }, ], }); const png = new Resvg(svg, { fitTo: { mode: "original" }, }) .render() .asPng(); return new Response(png, { headers: { "Content-Type": "image/png", }, }); }) satisfies RequestHandler;
chat-ui/src/routes/models/[...model]/thumbnail.png/+server.ts/0
{ "file_path": "chat-ui/src/routes/models/[...model]/thumbnail.png/+server.ts", "repo_id": "chat-ui", "token_count": 520 }
<script lang="ts"> import Modal from "$lib/components/Modal.svelte"; import ToolEdit from "../ToolEdit.svelte"; let { form = $bindable() } = $props(); </script> <Modal on:close={() => window.history.back()} width="h-[95dvh] w-[90dvw] overflow-hidden rounded-2xl bg-white shadow-2xl outline-none sm:h-[85dvh] xl:w-[1200px] 2xl:h-[75dvh]" > <ToolEdit bind:form on:close={() => window.history.back()} /> </Modal>
chat-ui/src/routes/tools/new/+page.svelte/0
{ "file_path": "chat-ui/src/routes/tools/new/+page.svelte", "repo_id": "chat-ui", "token_count": 176 }
{ "extends": "./.svelte-kit/tsconfig.json", "compilerOptions": { "allowJs": true, "checkJs": true, "esModuleInterop": true, "forceConsistentCasingInFileNames": true, "resolveJsonModule": true, "skipLibCheck": true, "sourceMap": true, "strict": true, "target": "ES2018" }, "exclude": ["vite.config.ts"] // Path aliases are handled by https://kit.svelte.dev/docs/configuration#alias // // If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes // from the referenced tsconfig.json - TypeScript does not merge them in }
chat-ui/tsconfig.json/0
{ "file_path": "chat-ui/tsconfig.json", "repo_id": "chat-ui", "token_count": 211 }
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration SPEED_TEST_N_EXAMPLES = 500_000 RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def map(dataset: datasets.Dataset, **kwargs): _ = dataset.map(**kwargs) @get_duration def filter(dataset: datasets.Dataset, **kwargs): _ = dataset.filter(**kwargs) def benchmark_map_filter(): times = {"num examples": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")}) dataset = generate_example_dataset( os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES ) tokenizer = transformers.AutoTokenizer.from_pretrained("bert-base-cased", use_fast=True) def tokenize(examples): return tokenizer(examples["text"]) times["map identity"] = map(dataset) times["map identity batched"] = map(dataset, batched=True) times["map no-op batched"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="numpy"): times["map no-op batched numpy"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="pandas"): times["map no-op batched pandas"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="torch", columns="numbers"): times["map no-op batched pytorch"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="tensorflow", columns="numbers"): times["map no-op batched tensorflow"] = map(dataset, function=lambda x: None, batched=True) times["map fast-tokenizer batched"] = map(dataset, function=tokenize, batched=True) times["filter"] = filter(dataset) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
datasets/benchmarks/benchmark_map_filter.py/0
{ "file_path": "datasets/benchmarks/benchmark_map_filter.py", "repo_id": "datasets", "token_count": 996 }
# Image classification Image classification datasets are used to train a model to classify an entire image. There are a wide variety of applications enabled by these datasets such as identifying endangered wildlife species or screening for disease in medical images. This guide will show you how to apply transformations to an image classification dataset. Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed: ```bash pip install -U albumentations opencv-python ``` This guide uses the [Beans](https://huggingface.co/datasets/beans) dataset for identifying the type of bean plant disease based on an image of its leaf. Load the dataset and take a look at an example: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("beans") >>> dataset["train"][10] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x7F8D2F4D7A10>, 'image_file_path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/angular_leaf_spot/angular_leaf_spot_train.204.jpg', 'labels': 0} ``` The dataset has three fields: * `image`: a PIL image object. * `image_file_path`: the path to the image file. * `labels`: the label or category of the image. Next, check out an image: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf.png"> </div> Now apply some augmentations with `albumentations`. You'll randomly crop the image, flip it horizontally, and adjust its brightness. ```py >>> import cv2 >>> import albumentations >>> import numpy as np >>> transform = albumentations.Compose([ ... albumentations.RandomCrop(width=256, height=256), ... albumentations.HorizontalFlip(p=0.5), ... albumentations.RandomBrightnessContrast(p=0.2), ... ]) ``` Create a function to apply the transformation to the images: ```py >>> def transforms(examples): ... examples["pixel_values"] = [ ... transform(image=np.array(image))["image"] for image in examples["image"] ... ] ... ... return examples ``` Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space: ```py >>> dataset.set_transform(transforms) ``` You can verify the transformation worked by indexing into the `pixel_values` of the first example: ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset["train"][0]["pixel_values"] >>> plt.imshow(img) ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png"> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/img_clf_aug.png"/> </div> <Tip> Now that you know how to process a dataset for image classification, learn [how to train an image classification model](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb) and use it for inference. </Tip>
datasets/docs/source/image_classification.mdx/0
{ "file_path": "datasets/docs/source/image_classification.mdx", "repo_id": "datasets", "token_count": 1043 }
# Table Classes Each `Dataset` object is backed by a PyArrow Table. A Table can be loaded from either the disk (memory mapped) or in memory. Several Table types are available, and they all inherit from [`table.Table`]. ## Table [[autodoc]] datasets.table.Table - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes ## InMemoryTable [[autodoc]] datasets.table.InMemoryTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file - from_buffer - from_pandas - from_arrays - from_pydict - from_batches ## MemoryMappedTable [[autodoc]] datasets.table.MemoryMappedTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file ## ConcatenationTable [[autodoc]] datasets.table.ConcatenationTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_blocks - from_tables ## Utils [[autodoc]] datasets.table.concat_tables [[autodoc]] datasets.table.list_table_cache_files
datasets/docs/source/package_reference/table_classes.mdx/0
{ "file_path": "datasets/docs/source/package_reference/table_classes.mdx", "repo_id": "datasets", "token_count": 1029 }
# Use with Polars This document is a quick introduction to using `datasets` with Polars, with a particular focus on how to process datasets using Polars functions, and how to convert a dataset to Polars or from Polars. This is particularly useful as it allows fast zero-copy operations, since both `datasets` and Polars use Arrow under the hood. ## Dataset format By default, datasets return regular Python objects: integers, floats, strings, lists, etc. To get Polars DataFrames or Series instead, you can set the format of the dataset to `polars` using [`Dataset.with_format`]: ```py >>> from datasets import Dataset >>> data = {"col_0": ["a", "b", "c", "d"], "col_1": [0., 0., 1., 1.]} >>> ds = Dataset.from_dict(data) >>> ds = ds.with_format("polars") >>> ds[0] # pl.DataFrame shape: (1, 2) ┌───────┬───────┐ │ col_0 ┆ col_1 │ │ --- ┆ --- │ │ str ┆ f64 │ ╞═══════╪═══════╡ │ a ┆ 0.0 │ └───────┴───────┘ >>> ds[:2] # pl.DataFrame shape: (2, 2) ┌───────┬───────┐ │ col_0 ┆ col_1 │ │ --- ┆ --- │ │ str ┆ f64 │ ╞═══════╪═══════╡ │ a ┆ 0.0 │ │ b ┆ 0.0 │ └───────┴───────┘ >>> ds["data"] # pl.Series shape: (4,) Series: 'col_0' [str] [ "a" "b" "c" "d" ] ``` This also works for `IterableDataset` objects obtained e.g. using `load_dataset(..., streaming=True)`: ```py >>> ds = ds.with_format("polars") >>> for df in ds.iter(batch_size=2): ... print(df) ... break shape: (2, 2) ┌───────┬───────┐ │ col_0 ┆ col_1 │ │ --- ┆ --- │ │ str ┆ f64 │ ╞═══════╪═══════╡ │ a ┆ 0.0 │ │ b ┆ 0.0 │ └───────┴───────┘ ``` ## Process data Polars functions are generally faster than regular hand-written python functions, and therefore they are a good option to optimize data processing. You can use Polars functions to process a dataset in [`Dataset.map`] or [`Dataset.filter`]: ```python >>> import polars as pl >>> from datasets import Dataset >>> data = {"col_0": ["a", "b", "c", "d"], "col_1": [0., 0., 1., 1.]} >>> ds = Dataset.from_dict(data) >>> ds = ds.with_format("polars") >>> ds = ds.map(lambda df: df.with_columns(pl.col("col_1").add(1).alias("col_2")), batched=True) >>> ds[:2] shape: (2, 3) ┌───────┬───────┬───────┐ │ col_0 ┆ col_1 ┆ col_2 │ │ --- ┆ --- ┆ --- │ │ str ┆ f64 ┆ f64 │ ╞═══════╪═══════╪═══════╡ │ a ┆ 0.0 ┆ 1.0 │ │ b ┆ 0.0 ┆ 1.0 │ └───────┴───────┴───────┘ >>> ds = ds.filter(lambda df: df["col_0"] == "b", batched=True) >>> ds[0] shape: (1, 3) ┌───────┬───────┬───────┐ │ col_0 ┆ col_1 ┆ col_2 │ │ --- ┆ --- ┆ --- │ │ str ┆ f64 ┆ f64 │ ╞═══════╪═══════╪═══════╡ │ b ┆ 0.0 ┆ 1.0 │ └───────┴───────┴───────┘ ``` We use `batched=True` because it is faster to process batches of data in Polars rather than row by row. It's also possible to use `batch_size=` in `map()` to set the size of each `df`. This also works for [`IterableDataset.map`] and [`IterableDataset.filter`]. ### Example: data extraction Many functions are available in Polars and for any data type: string, floats, integers, etc. You can find the full list [here](https://docs.pola.rs/api/python/stable/reference/expressions/functions.html). Those functions are written in Rust and run on batches of data which enables fast data processing. Here is an example that shows a 2.5x speed boost using Polars instead of a regular python function to extract solutions from a LLM reasoning dataset: ```python from datasets import load_dataset ds = load_dataset("ServiceNow-AI/R1-Distill-SFT", "v0", split="train") # Using a regular python function pattern = re.compile("boxed\\{(.*)\\}") result_ds = ds.map(lambda x: {"value_solution": m.group(1) if (m:=pattern.search(x["solution"])) else None}) # Time: 10s # Using a Polars function expr = pl.col("solution").str.extract("boxed\\{(.*)\\}").alias("value_solution") result_ds = ds.with_format("polars").map(lambda df: df.with_columns(expr), batched=True) # Time: 2s ``` ## Import or Export from Polars To import data from Polars, you can use [`Dataset.from_polars`]: ```python ds = Dataset.from_polars(df) ``` And you can use [`Dataset.to_polars`] to export a Dataset to a Polars DataFrame: ```python df = Dataset.from_polars(ds) ```
datasets/docs/source/use_with_polars.mdx/0
{ "file_path": "datasets/docs/source/use_with_polars.mdx", "repo_id": "datasets", "token_count": 1831 }
from abc import ABC, abstractmethod from argparse import ArgumentParser class BaseDatasetsCLICommand(ABC): @staticmethod @abstractmethod def register_subcommand(parser: ArgumentParser): raise NotImplementedError() @abstractmethod def run(self): raise NotImplementedError()
datasets/src/datasets/commands/__init__.py/0
{ "file_path": "datasets/src/datasets/commands/__init__.py", "repo_id": "datasets", "token_count": 107 }
__all__ = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "LargeList", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", "Video", ] from .audio import Audio from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, LargeList, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages from .video import Video
datasets/src/datasets/features/__init__.py/0
{ "file_path": "datasets/src/datasets/features/__init__.py", "repo_id": "datasets", "token_count": 181 }
import time from itertools import chain from typing import Optional, Union from huggingface_hub import ( CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi, HfFileSystem, ) from huggingface_hub.utils import HfHubHTTPError import datasets.config from datasets.info import DatasetInfosDict from datasets.inspect import get_dataset_config_names, get_dataset_default_config_name from datasets.load import load_dataset, load_dataset_builder from datasets.utils.metadata import MetadataConfigs def convert_to_parquet( repo_id: str, revision: Optional[str] = None, token: Optional[Union[bool, str]] = None, trust_remote_code: Optional[bool] = None, ) -> CommitInfo: """Convert Hub [script-based dataset](dataset_script) to Parquet [data-only dataset](repository_structure), so that the dataset viewer will be supported. This function: - makes a copy of the script on the "main" branch into a dedicated branch called "script" (if it does not already exist) - creates a pull request to the Hub dataset to convert it to Parquet files (and deletes the script from the main branch) If in the future you need to recreate the Parquet files from the "script" branch, pass the `revision="script"` argument. Note that you should pass the `trust_remote_code=True` argument only if you trust the remote code to be executed locally on your machine. Args: repo_id (`str`): ID of the source Hub dataset repository, in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. revision (`str`, *optional*): Branch of the source Hub dataset repository. Defaults to the `"main"` branch. token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub. trust_remote_code (`bool`, defaults to `False`): Whether you trust the remote code of the Hub script-based dataset to be executed locally on your machine. This option should only be set to `True` for repositories where you have read the code and which you trust. <Changed version="2.20.0"> `trust_remote_code` defaults to `False` if not specified. </Changed> Returns: `huggingface_hub.CommitInfo` """ print(f"{repo_id}") configs = get_dataset_config_names(repo_id, token=token, revision=revision, trust_remote_code=trust_remote_code) print(f"{configs = }") default_config = get_dataset_default_config_name( repo_id, token=token, revision=revision, trust_remote_code=trust_remote_code ) print(f"{default_config = }") if default_config: config = default_config configs.remove(default_config) else: config = configs.pop(0) print(f"{config = }") dataset = load_dataset(repo_id, config, revision=revision, trust_remote_code=trust_remote_code) commit_info = dataset.push_to_hub( repo_id, config_name=config, commit_message="Convert dataset to Parquet", commit_description="Convert dataset to Parquet.", create_pr=True, token=token, set_default=default_config is not None, ) time.sleep(5) pr_revision, pr_url = commit_info.pr_revision, commit_info.pr_url for config in configs: print(f"{config = }") dataset = load_dataset(repo_id, config, revision=revision, trust_remote_code=trust_remote_code) dataset.push_to_hub( repo_id, config_name=config, commit_message=f"Add '{config}' config data files", revision=pr_revision, token=token, ) time.sleep(5) _delete_files(repo_id, revision=pr_revision, token=token) if not revision: api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) try: api.create_branch(repo_id, branch="script", repo_type="dataset", token=token, exist_ok=True) except HfHubHTTPError: pass print(f"You can find your PR to convert the dataset to Parquet at: {pr_url}") return commit_info def delete_from_hub( repo_id: str, config_name: str, revision: Optional[str] = None, token: Optional[Union[bool, str]] = None, ) -> CommitInfo: """Delete a dataset configuration from a [data-only dataset](repository_structure) on the Hub. Args: repo_id (`str`): ID of the Hub dataset repository, in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. config_name (`str`): Name of the dataset configuration. revision (`str`, *optional*): Branch to delete the configuration from. Defaults to the `"main"` branch. token (`bool` or `str`, *optional*): Authentication token for the Hugging Face Hub. Returns: `huggingface_hub.CommitInfo` """ operations = [] # data_files fs = HfFileSystem(endpoint=datasets.config.HF_ENDPOINT, token=token) builder = load_dataset_builder(repo_id, config_name, revision=revision, token=token, trust_remote_code=False) for data_file in chain(*builder.config.data_files.values()): data_file_resolved_path = fs.resolve_path(data_file) if data_file_resolved_path.repo_id == repo_id: operations.append(CommitOperationDelete(path_in_repo=data_file_resolved_path.path_in_repo)) # README.md dataset_card = DatasetCard.load(repo_id) # config_names if dataset_card.data.get("config_names", None) and config_name in dataset_card.data["config_names"]: dataset_card.data["config_names"].remove(config_name) # metadata_configs metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card.data) if metadata_configs: _ = metadata_configs.pop(config_name, None) dataset_card_data = DatasetCardData() metadata_configs.to_dataset_card_data(dataset_card_data) if datasets.config.METADATA_CONFIGS_FIELD in dataset_card_data: dataset_card.data[datasets.config.METADATA_CONFIGS_FIELD] = dataset_card_data[ datasets.config.METADATA_CONFIGS_FIELD ] else: _ = dataset_card.data.pop(datasets.config.METADATA_CONFIGS_FIELD, None) # dataset_info dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card.data) if dataset_infos: _ = dataset_infos.pop(config_name, None) dataset_card_data = DatasetCardData() dataset_infos.to_dataset_card_data(dataset_card_data) if "dataset_info" in dataset_card_data: dataset_card.data["dataset_info"] = dataset_card_data["dataset_info"] else: _ = dataset_card.data.pop("dataset_info", None) # Commit operations.append( CommitOperationAdd(path_in_repo=datasets.config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode()) ) api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) commit_info = api.create_commit( repo_id, operations=operations, commit_message=f"Delete '{config_name}' config", commit_description=f"Delete '{config_name}' config.", token=token, repo_type="dataset", revision=revision, create_pr=True, ) print(f"You can find your PR to delete the dataset config at: {commit_info.pr_url}") return commit_info def _delete_files(dataset_id, revision=None, token=None): dataset_name = dataset_id.split("/")[-1] hf_api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) repo_files = hf_api.list_repo_files( dataset_id, repo_type="dataset", ) if repo_files: legacy_json_file = [] python_files = [] data_files = [] for filename in repo_files: if filename in {".gitattributes", "README.md"}: continue elif filename == f"{dataset_name}.py": hf_api.delete_file( filename, dataset_id, repo_type="dataset", revision=revision, commit_message="Delete loading script", ) elif filename == "dataset_infos.json": legacy_json_file.append(filename) elif filename.endswith(".py"): python_files.append(filename) else: data_files.append(filename) if legacy_json_file: hf_api.delete_file( "dataset_infos.json", dataset_id, repo_type="dataset", revision=revision, commit_message="Delete legacy dataset_infos.json", ) if python_files: for filename in python_files: hf_api.delete_file( filename, dataset_id, repo_type="dataset", revision=revision, commit_message="Delete loading script auxiliary file", ) if data_files: for filename in data_files: hf_api.delete_file( filename, dataset_id, repo_type="dataset", revision=revision, commit_message="Delete data file", )
datasets/src/datasets/hub.py/0
{ "file_path": "datasets/src/datasets/hub.py", "repo_id": "datasets", "token_count": 4128 }
import inspect import re from typing import Dict, List, Tuple from huggingface_hub.utils import insecure_hashlib from .arrow import arrow from .audiofolder import audiofolder from .cache import cache from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql from .text import text from .videofolder import videofolder from .webdataset import webdataset from .xml import xml def _hash_python_lines(lines: List[str]) -> str: filtered_lines = [] for line in lines: line = re.sub(r"#.*", "", line) # remove comments if line: filtered_lines.append(line) full_str = "\n".join(filtered_lines) # Make a hash from all this code full_bytes = full_str.encode("utf-8") return insecure_hashlib.sha256(full_bytes).hexdigest() # get importable module names and hash for caching _PACKAGED_DATASETS_MODULES = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), "videofolder": (videofolder.__name__, _hash_python_lines(inspect.getsource(videofolder).splitlines())), "webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())), "xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())), } # get importable module names and hash for caching _PACKAGED_DATASETS_MODULES_2_15_HASHES = { "csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d", "json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96", "pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202", "parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1", "arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137", "text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34", "imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5", "audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c", } # Used to infer the module to use based on the data files extensions _EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), # ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417) ".ndjson": ("json", {}), ".parquet": ("parquet", {}), ".geoparquet": ("parquet", {}), ".gpq": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), ".tar": ("webdataset", {}), ".xml": ("xml", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS}) _MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder", "videofolder"} # Used to filter data files based on extensions given a module name _MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) for _module in _MODULE_TO_EXTENSIONS: _MODULE_TO_EXTENSIONS[_module].append(".zip")
datasets/src/datasets/packaged_modules/__init__.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/__init__.py", "repo_id": "datasets", "token_count": 1752 }
import io import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import pyarrow.json as paj import datasets import datasets.config from datasets.table import table_cast from datasets.utils.file_utils import readline logger = datasets.utils.logging.get_logger(__name__) def ujson_dumps(*args, **kwargs): try: return pd.io.json.ujson_dumps(*args, **kwargs) except AttributeError: # Before pandas-2.2.0, ujson_dumps was renamed to dumps: import ujson_dumps as dumps return pd.io.json.dumps(*args, **kwargs) def ujson_loads(*args, **kwargs): try: return pd.io.json.ujson_loads(*args, **kwargs) except AttributeError: # Before pandas-2.2.0, ujson_loads was renamed to loads: import ujson_loads as loads return pd.io.json.loads(*args, **kwargs) def pandas_read_json(path_or_buf, **kwargs): if datasets.config.PANDAS_VERSION.major >= 2: kwargs["dtype_backend"] = "pyarrow" return pd.read_json(path_or_buf, **kwargs) @dataclass class JsonConfig(datasets.BuilderConfig): """BuilderConfig for JSON.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" encoding_errors: Optional[str] = None field: Optional[str] = None use_threads: bool = True # deprecated block_size: Optional[int] = None # deprecated chunksize: int = 10 << 20 # 10MB newlines_in_values: Optional[bool] = None def __post_init__(self): super().__post_init__() class Json(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = JsonConfig def _info(self): if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead") self.config.chunksize = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported") return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") dl_manager.download_config.extract_on_the_fly = True data_files = dl_manager.download_and_extract(self.config.data_files) splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features) - set(pa_table.column_names): type = self.config.features.arrow_schema.field(column_name).type pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type)) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.config.features.arrow_schema) return pa_table def _generate_tables(self, files): for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # If the file is one json object and if we need to look at the items in one specific field if self.config.field is not None: with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: dataset = ujson_loads(f.read()) # We keep only the field we are interested in dataset = dataset[self.config.field] df = pandas_read_json(io.StringIO(ujson_dumps(dataset))) if df.columns.tolist() == [0]: df.columns = list(self.config.features) if self.config.features else ["text"] pa_table = pa.Table.from_pandas(df, preserve_index=False) yield file_idx, self._cast_table(pa_table) # If the file has one json object per line else: with open(file, "rb") as f: batch_idx = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small block_size = max(self.config.chunksize // 32, 16 << 10) encoding_errors = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: batch = f.read(self.config.chunksize) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(f) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8") try: while True: try: pa_table = paj.read_json( io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(e, pa.ArrowInvalid) and "straddling" not in str(e) or block_size > len(batch) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( file, encoding=self.config.encoding, errors=self.config.encoding_errors ) as f: df = pandas_read_json(f) except ValueError: logger.error(f"Failed to load JSON from file '{file}' with error {type(e)}: {e}") raise e if df.columns.tolist() == [0]: df.columns = list(self.config.features) if self.config.features else ["text"] try: pa_table = pa.Table.from_pandas(df, preserve_index=False) except pa.ArrowInvalid as e: logger.error( f"Failed to convert pandas DataFrame to Arrow Table from file '{file}' with error {type(e)}: {e}" ) raise ValueError( f"Failed to convert pandas DataFrame to Arrow Table from file {file}." ) from None yield file_idx, self._cast_table(pa_table) break yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1
datasets/src/datasets/packaged_modules/json/json.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/json/json.py", "repo_id": "datasets", "token_count": 4543 }
""" Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ import asyncio import glob import io import json import multiprocessing import os import posixpath import re import shutil import sys import tarfile import time import xml.dom.minidom import zipfile from contextlib import contextmanager from io import BytesIO from itertools import chain from pathlib import Path, PurePosixPath from typing import Any, Dict, Generator, List, Optional, Tuple, TypeVar, Union from unittest.mock import patch from urllib.parse import urlparse from xml.etree import ElementTree as ET import aiohttp.client_exceptions import fsspec import huggingface_hub import huggingface_hub.errors import requests from fsspec.core import strip_protocol, url_to_fs from fsspec.utils import can_be_local from huggingface_hub.utils import EntryNotFoundError, get_session, insecure_hashlib from packaging import version from .. import __version__, config from ..download.download_config import DownloadConfig from ..filesystems import COMPRESSION_FILESYSTEMS from . import _tqdm, logging from ._filelock import FileLock from .extract import ExtractManager from .track import TrackedIterableFromGenerator logger = logging.get_logger(__name__) # pylint: disable=invalid-name INCOMPLETE_SUFFIX = ".incomplete" T = TypeVar("T", str, Path) def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str: """ Add hf_modules_cache to the python path. By default hf_modules_cache='~/.cache/huggingface/modules'. It can also be set with the environment variable HF_MODULES_CACHE. This is used to add modules such as `datasets_modules` """ hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE hf_modules_cache = str(hf_modules_cache) if hf_modules_cache not in sys.path: sys.path.append(hf_modules_cache) os.makedirs(hf_modules_cache, exist_ok=True) if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")): with open(os.path.join(hf_modules_cache, "__init__.py"), "w"): pass return hf_modules_cache def is_remote_url(url_or_filename: str) -> bool: return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/") def is_local_path(url_or_filename: str) -> bool: # On unix the scheme of a local path is empty (for both absolute and relative), # while on windows the scheme is the drive name (ex: "c") for absolute paths. # for details on the windows behavior, see https://bugs.python.org/issue42215 return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/") def is_relative_path(url_or_filename: str) -> bool: return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename) def relative_to_absolute_path(path: T) -> T: """Convert relative path to absolute path.""" abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path)))) return Path(abs_path_str) if isinstance(path, Path) else abs_path_str def url_or_path_join(base_name: str, *pathnames: str) -> str: if is_remote_url(base_name): return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames)) else: return Path(base_name, *pathnames).as_posix() def url_or_path_parent(url_or_path: str) -> str: if is_remote_url(url_or_path): return url_or_path[: url_or_path.rindex("/")] else: return os.path.dirname(url_or_path) def hash_url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name so that TF 2.0 can identify it as a HDF5 file (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) """ url_bytes = url.encode("utf-8") url_hash = insecure_hashlib.sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = insecure_hashlib.sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".py"): filename += ".py" return filename def cached_path( url_or_filename, download_config=None, **download_kwargs, ) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk ValueError: if it couldn't parse the url or filename correctly requests.exceptions.ConnectionError: in case of internet connection issue """ if download_config is None: download_config = DownloadConfig(**download_kwargs) cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) # Convert fsspec URL in the format "file://local/path" to "local/path" if can_be_local(url_or_filename): url_or_filename = strip_protocol(url_or_filename) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) url_or_filename, storage_options = _prepare_path_and_storage_options( url_or_filename, download_config=download_config ) # Download files from Hugging Face. # Note: no need to check for https://huggingface.co file URLs since _prepare_path_and_storage_options # prepares Hugging Face HTTP URLs as hf:// paths already if url_or_filename.startswith("hf://"): resolved_path = huggingface_hub.HfFileSystem( endpoint=config.HF_ENDPOINT, token=download_config.token ).resolve_path(url_or_filename) try: output_path = huggingface_hub.HfApi( endpoint=config.HF_ENDPOINT, token=download_config.token, library_name="datasets", library_version=__version__, user_agent=get_datasets_user_agent(download_config.user_agent), ).hf_hub_download( repo_id=resolved_path.repo_id, repo_type=resolved_path.repo_type, revision=resolved_path.revision, filename=resolved_path.path_in_repo, force_download=download_config.force_download, proxies=download_config.proxies, ) except ( huggingface_hub.utils.RepositoryNotFoundError, huggingface_hub.utils.EntryNotFoundError, huggingface_hub.utils.RevisionNotFoundError, huggingface_hub.utils.GatedRepoError, ) as e: raise FileNotFoundError(str(e)) from e # Download external files else: output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=download_config.force_download, user_agent=download_config.user_agent, use_etag=download_config.use_etag, token=download_config.token, storage_options=storage_options, download_desc=download_config.download_desc, disable_tqdm=download_config.disable_tqdm, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif is_local_path(url_or_filename): # File, but it doesn't exist. raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") else: # Something unknown raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") if output_path is None: return output_path if download_config.extract_compressed_file: if download_config.extract_on_the_fly: # Add a compression prefix to the compressed file so that it can be extracted # as it's being read using xopen. protocol = _get_extraction_protocol(output_path, download_config=download_config) extension = _get_path_extension(url_or_filename.split("::")[0]) if ( protocol and extension not in ["tgz", "tar"] and not url_or_filename.split("::")[0].endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): output_path = relative_to_absolute_path(output_path) if protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS: # there is one single file which is the uncompressed file inner_file = os.path.basename(output_path) inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file output_path = f"{protocol}://{inner_file}::{output_path}" else: output_path = f"{protocol}://::{output_path}" return output_path # Eager extraction output_path = ExtractManager(cache_dir=download_config.cache_dir).extract( output_path, force_extract=download_config.force_extract ) return relative_to_absolute_path(output_path) def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str: ua = f"datasets/{__version__}" ua += f"; python/{config.PY_VERSION}" ua += f"; huggingface_hub/{huggingface_hub.__version__}" ua += f"; pyarrow/{config.PYARROW_VERSION}" if config.TORCH_AVAILABLE: ua += f"; torch/{config.TORCH_VERSION}" if config.TF_AVAILABLE: ua += f"; tensorflow/{config.TF_VERSION}" if config.JAX_AVAILABLE: ua += f"; jax/{config.JAX_VERSION}" if isinstance(user_agent, dict): ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}" elif isinstance(user_agent, str): ua += "; " + user_agent return ua def get_authentication_headers_for_url(url: str, token: Optional[Union[str, bool]] = None) -> dict: """Handle the HF authentication""" if url.startswith(config.HF_ENDPOINT): return huggingface_hub.utils.build_hf_headers( token=token, library_name="datasets", library_version=__version__ ) else: return {} def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None): """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_HUB_OFFLINE is True.""" if config.HF_HUB_OFFLINE: raise huggingface_hub.errors.OfflineModeIsEnabled( "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg) ) def fsspec_head(url, storage_options=None): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") fs, path = url_to_fs(url, **(storage_options or {})) return fs.info(path) def stack_multiprocessing_download_progress_bars(): # Stack downloads progress bars automatically using HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS=1 # We use environment variables since the download may happen in a subprocess return patch.dict(os.environ, {"HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS": "1"}) class TqdmCallback(fsspec.callbacks.TqdmCallback): def __init__(self, tqdm_kwargs=None, *args, **kwargs): if config.FSSPEC_VERSION < version.parse("2024.2.0"): super().__init__(tqdm_kwargs, *args, **kwargs) self._tqdm = _tqdm # replace tqdm module by datasets.utils.tqdm module else: kwargs["tqdm_cls"] = _tqdm.tqdm super().__init__(tqdm_kwargs, *args, **kwargs) def fsspec_get(url, temp_file, storage_options=None, desc=None, disable_tqdm=False): _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") fs, path = url_to_fs(url, **(storage_options or {})) callback = TqdmCallback( tqdm_kwargs={ "desc": desc or "Downloading", "unit": "B", "unit_scale": True, "position": multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1" and multiprocessing.current_process()._identity else None, "disable": disable_tqdm, } ) fs.get_file(path, temp_file.name, callback=callback) def get_from_cache( url, cache_dir=None, force_download=False, user_agent=None, use_etag=True, token=None, storage_options=None, download_desc=None, disable_tqdm=False, ) -> str: """ Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) ConnectionError: in case of unreachable url and no cache on disk """ if storage_options is None: storage_options = {} if cache_dir is None: cache_dir = config.HF_DATASETS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) response = None etag = None # Try a first time to file the file on the local file system without eTag (None) # if we don't ask for 'force_download' then we spare a request filename = hash_url_to_filename(url, etag=None) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download and not use_etag: return cache_path # Prepare headers for authentication headers = get_authentication_headers_for_url(url, token=token) if user_agent is not None: headers["user-agent"] = user_agent response = fsspec_head(url, storage_options=storage_options) etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None # Try a second time filename = hash_url_to_filename(url, etag) cache_path = os.path.join(cache_dir, filename) if os.path.exists(cache_path) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # Retry in case previously locked processes just enter after the precedent process releases the lock if os.path.exists(cache_path) and not force_download: return cache_path incomplete_path = cache_path + ".incomplete" @contextmanager def temp_file_manager(mode="w+b"): with open(incomplete_path, mode) as f: yield f # Download to temporary file, then copy to cache path once finished. # Otherwise, you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") # GET file object fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc, disable_tqdm=disable_tqdm) logger.info(f"storing {url} in cache at {cache_path}") shutil.move(temp_file.name, cache_path) umask = os.umask(0o666) os.umask(umask) os.chmod(cache_path, 0o666 & ~umask) logger.info(f"creating metadata file for {cache_path}") meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w", encoding="utf-8") as meta_file: json.dump(meta, meta_file) return cache_path def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr) return fn return docstring_decorator def estimate_dataset_size(paths): return sum(path.stat().st_size for path in paths) def readline(f: io.RawIOBase): # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525 res = bytearray() while True: b = f.read(1) if not b: break res += b if res.endswith(b"\n"): break return bytes(res) ####################### # Streaming utilities # ####################### BASE_KNOWN_EXTENSIONS = [ "txt", "csv", "json", "jsonl", "tsv", "conll", "conllu", "orig", "parquet", "pkl", "pickle", "rel", "xml", "arrow", ] COMPRESSION_EXTENSION_TO_PROTOCOL = { # single file compression **{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}, # archive compression "zip": "zip", } SINGLE_FILE_COMPRESSION_EXTENSION_TO_PROTOCOL = { fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS } SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS} SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/") MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = { bytes.fromhex("504B0304"): "zip", bytes.fromhex("504B0506"): "zip", # empty archive bytes.fromhex("504B0708"): "zip", # spanned archive bytes.fromhex("425A68"): "bz2", bytes.fromhex("1F8B"): "gzip", bytes.fromhex("FD377A585A00"): "xz", bytes.fromhex("04224D18"): "lz4", bytes.fromhex("28B52FFD"): "zstd", } MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = { b"Rar!": "rar", } MAGIC_NUMBER_MAX_LENGTH = max( len(magic_number) for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL) ) class NonStreamableDatasetError(Exception): pass def _get_path_extension(path: str) -> str: # Get extension: https://foo.bar/train.json.gz -> gz extension = path.split(".")[-1] # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt for symb in "?-_": extension = extension.split(symb)[0] return extension def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: """read the magic number from a file-like object and return the compression protocol""" # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440) try: f.seek(0) except (AttributeError, io.UnsupportedOperation): return None magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f"Compression protocol '{compression}' not implemented.") def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]: # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz urlpath = str(urlpath) path = urlpath.split("::")[0] extension = _get_path_extension(path) if ( extension in BASE_KNOWN_EXTENSIONS or extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")) ): return None elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL: return COMPRESSION_EXTENSION_TO_PROTOCOL[extension] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) try: with fsspec.open(urlpath, **(storage_options or {})) as f: return _get_extraction_protocol_with_magic_number(f) except FileNotFoundError: if urlpath.startswith(config.HF_ENDPOINT): raise FileNotFoundError( urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise def xjoin(a, *p): """ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xjoin function allows you to apply the join on the first path of the chain. Example:: >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt") zip://folder1/file.txt::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): return os.path.join(a, *p) else: a = posixpath.join(a, *p) return "::".join([a] + b) def xdirname(a): """ This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xdirname function allows you to apply the dirname on the first path of the chain. Example:: >>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip") zip://folder1::https://host.com/archive.zip """ a, *b = str(a).split("::") if is_local_path(a): a = os.path.dirname(Path(a).as_posix()) else: a = posixpath.dirname(a) # if we end up at the root of the protocol, we get for example a = 'http:' # so we have to fix it by adding the '//' that was removed: if a.endswith(":"): a += "//" return "::".join([a] + b) def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None): """Extend `os.path.exists` function to support both local and remote files. Args: urlpath (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return os.path.exists(main_hop) else: urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = url_to_fs(urlpath, **storage_options) return fs.exists(main_hop) def xbasename(a): """ This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xbasename function allows you to apply the basename on the first path of the chain. Example:: >>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip") file.txt """ a, *b = str(a).split("::") if is_local_path(a): return os.path.basename(Path(a).as_posix()) else: return posixpath.basename(a) def xsplit(a): """ This function extends os.path.split to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplit function allows you to apply the xsplit on the first path of the chain. Example:: >>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1::https://host.com/archive.zip', 'file.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.split(Path(a).as_posix()) else: a, tail = posixpath.split(a) return "::".join([a + "//" if a.endswith(":") else a] + b), tail def xsplitext(a): """ This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls. A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::". This is used to access files inside a zip file over http for example. Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt. Then you can just chain the url this way: zip://folder1/file.txt::https://host.com/archive.zip The xsplitext function allows you to apply the splitext on the first path of the chain. Example:: >>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip") ('zip://folder1/file::https://host.com/archive.zip', '.txt') """ a, *b = str(a).split("::") if is_local_path(a): return os.path.splitext(Path(a).as_posix()) else: a, ext = posixpath.splitext(a) return "::".join([a] + b), ext def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isfile` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isfile(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = url_to_fs(path, **storage_options) return fs.isfile(main_hop) def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int: """Extend `os.path.getsize` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `int`: optional """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.getsize(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fs, *_ = url_to_fs(path, **storage_options) try: size = fs.size(main_hop) except EntryNotFoundError: raise FileNotFoundError(f"No such file: {path}") if size is None: # use xopen instead of fs.open to make data fetching more robust with xopen(path, download_config=download_config) as f: size = len(f.read()) return size def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool: """Extend `os.path.isdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.isdir(path) else: path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = fs, *_ = url_to_fs(path, **storage_options) inner_path = main_hop.split("://")[-1] if not inner_path.strip("/"): return True return fs.isdir(inner_path) def xrelpath(path, start=None): """Extend `os.path.relpath` function to support remote files. Args: path (`str`): URL path. start (`str`): Start URL directory path. Returns: `str` """ main_hop, *rest_hops = str(path).split("::") if is_local_path(main_hop): return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop) else: return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop) def _add_retries_to_file_obj_read_method(file_obj): read = file_obj.read max_retries = config.STREAMING_READ_MAX_RETRIES def read_with_retries(*args, **kwargs): disconnect_err = None for retry in range(1, max_retries + 1): try: out = read(*args, **kwargs) break except ( aiohttp.client_exceptions.ClientError, asyncio.TimeoutError, requests.exceptions.ConnectionError, requests.exceptions.Timeout, ) as err: disconnect_err = err logger.warning( f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]" ) time.sleep(config.STREAMING_READ_RETRY_INTERVAL) else: raise ConnectionError("Server Disconnected") from disconnect_err return out try: file_obj.read = read_with_retries except AttributeError: # read-only attribute orig_file_obj = file_obj file_obj = io.RawIOBase() file_obj.read = read_with_retries file_obj.__getattr__ = lambda _, attr: getattr(orig_file_obj, attr) return file_obj def _prepare_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, Dict[str, Dict[str, Any]]]: prepared_urlpath = [] prepared_storage_options = {} for hop in urlpath.split("::"): hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config) prepared_urlpath.append(hop) prepared_storage_options.update(storage_options) return "::".join(prepared_urlpath), storage_options def _prepare_single_hop_path_and_storage_options( urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Tuple[str, Dict[str, Dict[str, Any]]]: """ Prepare the URL and the kwargs that must be passed to the HttpFileSystem or HfFileSystem In particular it resolves google drive URLs It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths. Storage options are formatted in the form {protocol: storage_options_for_protocol} """ token = None if download_config is None else download_config.token if urlpath.startswith(config.HF_ENDPOINT) and "/resolve/" in urlpath: urlpath = "hf://" + urlpath[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1) protocol = urlpath.split("://")[0] if "://" in urlpath else "file" if download_config is not None and protocol in download_config.storage_options: storage_options = download_config.storage_options[protocol].copy() elif download_config is not None and protocol not in download_config.storage_options: storage_options = { option_name: option_value for option_name, option_value in download_config.storage_options.items() if option_name not in fsspec.available_protocols() } else: storage_options = {} if protocol in {"http", "https"}: client_kwargs = storage_options.pop("client_kwargs", {}) storage_options["client_kwargs"] = {"trust_env": True, **client_kwargs} # Enable reading proxy env variables if "drive.google.com" in urlpath: response = get_session().head(urlpath, timeout=10) for k, v in response.cookies.items(): if k.startswith("download_warning"): urlpath += "&confirm=" + v cookies = response.cookies storage_options = {"cookies": cookies, **storage_options} # Fix Google Drive URL to avoid Virus scan warning if "confirm=" not in urlpath: urlpath += "&confirm=t" if urlpath.startswith("https://raw.githubusercontent.com/"): # Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389 headers = storage_options.pop("headers", {}) storage_options["headers"] = {"Accept-Encoding": "identity", **headers} elif protocol == "hf": storage_options = { "token": token, "endpoint": config.HF_ENDPOINT, **storage_options, } # streaming with block_size=0 is only implemented in 0.21 (see https://github.com/huggingface/huggingface_hub/pull/1967) if config.HF_HUB_VERSION < version.parse("0.21.0"): storage_options["block_size"] = "default" if storage_options: storage_options = {protocol: storage_options} return urlpath, storage_options def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `open` function to support remote files using `fsspec`. It also has a retry mechanism in case connection fails. The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co Args: file (`str`): Path name of the file to be opened. mode (`str`, *optional*, default "r"): Mode in which the file is opened. *args: Arguments to be passed to `fsspec.open`. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Keyword arguments to be passed to `fsspec.open`. Returns: file object """ # This works as well for `xopen(str(Path(...)))` file_str = _as_str(file) main_hop, *rest_hops = file_str.split("::") if is_local_path(main_hop): # ignore fsspec-specific kwargs kwargs.pop("block_size", None) return open(main_hop, mode, *args, **kwargs) # add headers and cookies for authentication on the HF Hub and for Google Drive file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config) kwargs = {**kwargs, **(storage_options or {})} try: file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open() except ValueError as e: if str(e) == "Cannot seek streaming HTTP file": raise NonStreamableDatasetError( "Streaming is not possible for this dataset because data host server doesn't support HTTP range " "requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)" ) from e else: raise except FileNotFoundError: if file.startswith(config.HF_ENDPOINT): raise FileNotFoundError( file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`." ) from None else: raise file_obj = _add_retries_to_file_obj_read_method(file_obj) return file_obj def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]: """Extend `os.listdir` function to support remote files. Args: path (`str`): URL path. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(path).split("::") if is_local_path(main_hop): return os.listdir(path) else: # globbing inside a zip in a private repo requires authentication path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config) main_hop, *rest_hops = path.split("::") fs, *_ = url_to_fs(path, **storage_options) inner_path = main_hop.split("://")[-1] if inner_path.strip("/") and not fs.isdir(inner_path): raise FileNotFoundError(f"Directory doesn't exist: {path}") paths = fs.listdir(inner_path, detail=False) return [os.path.basename(path.rstrip("/")) for path in paths] def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None): """Extend `glob.glob` function to support remote files. Args: urlpath (`str`): URL path with shell-style wildcard patterns. recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more directories or subdirectories. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `list` of `str` """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): return glob.glob(main_hop, recursive=recursive) else: # globbing inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = url_to_fs(urlpath, **storage_options) inner_path = main_hop.split("://")[1] globbed_paths = fs.glob(inner_path) protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths] def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `os.walk` function to support remote files. Args: urlpath (`str`): URL root path. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs: Additional keyword arguments forwarded to the underlying filesystem. Yields: `tuple`: 3-tuple (dirpath, dirnames, filenames). """ main_hop, *rest_hops = _as_str(urlpath).split("::") if is_local_path(main_hop): yield from os.walk(main_hop, **kwargs) else: # walking inside a zip in a private repo requires authentication urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) main_hop, *rest_hops = urlpath.split("::") fs, *_ = url_to_fs(urlpath, **storage_options) inner_path = main_hop.split("://")[-1] if inner_path.strip("/") and not fs.isdir(inner_path): return [] protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1] for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs): yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames class xPath(type(Path())): """Extension of `pathlib.Path` to support both local paths and remote URLs.""" def __str__(self): path_str = super().__str__() main_hop, *rest_hops = path_str.split("::") if is_local_path(main_hop): return main_hop path_as_posix = path_str.replace("\\", "/") path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix) path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol return path_as_posix def exists(self, download_config: Optional[DownloadConfig] = None): """Extend `pathlib.Path.exists` method to support both local and remote files. Args: download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `bool` """ return xexists(str(self), download_config=download_config) def glob(self, pattern, download_config: Optional[DownloadConfig] = None): """Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. download_config : mainly use token or storage_options to support different platforms and auth types. Yields: [`xPath`] """ posix_path = self.as_posix() main_hop, *rest_hops = posix_path.split("::") if is_local_path(main_hop): yield from Path(main_hop).glob(pattern) else: # globbing inside a zip in a private repo requires authentication if rest_hops: urlpath = rest_hops[0] urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config) storage_options = {urlpath.split("://")[0]: storage_options} posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]]) else: storage_options = None fs, *_ = url_to_fs(xjoin(posix_path, pattern), **(storage_options or {})) globbed_paths = fs.glob(xjoin(main_hop, pattern)) for globbed_path in globbed_paths: yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops)) def rglob(self, pattern, **kwargs): """Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Args: pattern (`str`): Pattern that resulting paths must match. Yields: [`xPath`] """ return self.glob("**/" + pattern, **kwargs) @property def parent(self) -> "xPath": """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: [`xPath`] """ return type(self)(xdirname(self.as_posix())) @property def name(self) -> str: """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).name @property def stem(self) -> str: """Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).stem @property def suffix(self) -> str: """Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs. Returns: `str` """ return PurePosixPath(self.as_posix().split("::")[0]).suffix def open(self, *args, **kwargs): """Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`. Args: **args: Arguments passed to :func:`fsspec.open`. **kwargs: Keyword arguments passed to :func:`fsspec.open`. Returns: `io.FileIO`: File-like object. """ return xopen(str(self), *args, **kwargs) def joinpath(self, *p: Tuple[str, ...]) -> "xPath": """Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`. Args: *p (`tuple` of `str`): Other path components. Returns: [`xPath`] """ return type(self)(xjoin(self.as_posix(), *p)) def __truediv__(self, p: str) -> "xPath": return self.joinpath(p) def with_suffix(self, suffix): main_hop, *rest_hops = str(self).split("::") if is_local_path(main_hop): return type(self)(str(super().with_suffix(suffix))) return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops)) def _as_str(path: Union[str, Path, xPath]): return str(path) if isinstance(path, xPath) else str(xPath(str(path))) def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import gzip if hasattr(filepath_or_buffer, "read"): return gzip.open(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs): import numpy as np if hasattr(filepath_or_buffer, "read"): return np.load(filepath_or_buffer, *args, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs) def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): return pd.read_csv(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) if kwargs.get("compression", "infer") == "infer": kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config) return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pandas as pd if hasattr(filepath_or_buffer, "read"): try: return pd.read_excel(filepath_or_buffer, **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) try: return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) except ValueError: # Cannot seek streaming HTTP file return pd.read_excel( BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs ) def xpyarrow_parquet_read_table(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import pyarrow.parquet as pq if hasattr(filepath_or_buffer, "read"): return pq.read_table(filepath_or_buffer, **kwargs) else: filepath_or_buffer = str(filepath_or_buffer) return pq.read_table(xopen(filepath_or_buffer, mode="rb", download_config=download_config), **kwargs) def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs): import scipy.io as sio if hasattr(filepath_or_buffer, "read"): return sio.loadmat(filepath_or_buffer, **kwargs) else: return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs) def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None): """Extend `xml.etree.ElementTree.parse` function to support remote files. Args: source: File path or file object. parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance. download_config : mainly use token or storage_options to support different platforms and auth types. Returns: `xml.etree.ElementTree.Element`: Root element of the given source document. """ if hasattr(source, "read"): return ET.parse(source, parser=parser) else: with xopen(source, "rb", download_config=download_config) as f: return ET.parse(f, parser=parser) def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs): """Extend `xml.dom.minidom.parse` function to support remote files. Args: filename_or_file (`str` or file): File path or file object. download_config : mainly use token or storage_options to support different platforms and auth types. **kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`. Returns: :obj:`xml.dom.minidom.Document`: Parsed document. """ if hasattr(filename_or_file, "read"): return xml.dom.minidom.parse(filename_or_file, **kwargs) else: with xopen(filename_or_file, "rb", download_config=download_config) as f: return xml.dom.minidom.parse(f, **kwargs) class ArchiveIterable(TrackedIterableFromGenerator): """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`""" @staticmethod def _iter_tar(f): stream = tarfile.open(fileobj=f, mode="r|*") for tarinfo in stream: file_path = tarinfo.name if not tarinfo.isreg(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = stream.extractfile(tarinfo) yield file_path, file_obj stream.members = [] del stream @staticmethod def _iter_zip(f): zipf = zipfile.ZipFile(f) for member in zipf.infolist(): file_path = member.filename if member.is_dir(): continue if file_path is None: continue if os.path.basename(file_path).startswith((".", "__")): # skipping hidden files continue file_obj = zipf.open(member) yield file_path, file_obj @classmethod def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol_with_magic_number(f) if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def _iter_from_urlpath( cls, urlpath: str, download_config: Optional[DownloadConfig] = None ) -> Generator[Tuple, None, None]: compression = _get_extraction_protocol(urlpath, download_config=download_config) # Set block_size=0 to get faster streaming # (e.g. for hf:// and https:// it uses streaming Requests file-like instances) with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f: if compression == "zip": yield from cls._iter_zip(f) else: yield from cls._iter_tar(f) @classmethod def from_buf(cls, fileobj) -> "ArchiveIterable": return cls(cls._iter_from_fileobj, fileobj) @classmethod def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable": return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config) class FilesIterable(TrackedIterableFromGenerator): """An iterable of paths from a list of directories or files""" @classmethod def _iter_from_urlpaths( cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None ) -> Generator[str, None, None]: if not isinstance(urlpaths, list): urlpaths = [urlpaths] for urlpath in urlpaths: if xisfile(urlpath, download_config=download_config): yield urlpath elif xisdir(urlpath, download_config=download_config): for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config): # in-place modification to prune the search dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))]) if xbasename(dirpath).startswith((".", "__")): # skipping hidden directories continue for filename in sorted(filenames): if filename.startswith((".", "__")): # skipping hidden files continue yield xjoin(dirpath, filename) else: raise FileNotFoundError(urlpath) @classmethod def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable": return cls(cls._iter_from_urlpaths, urlpaths, download_config)
datasets/src/datasets/utils/file_utils.py/0
{ "file_path": "datasets/src/datasets/utils/file_utils.py", "repo_id": "datasets", "token_count": 22628 }
# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF-specific utils import.""" import os import warnings from functools import partial from math import ceil from uuid import uuid4 import numpy as np import pyarrow as pa from multiprocess import get_context try: from multiprocess.shared_memory import SharedMemory except ImportError: SharedMemory = None # Version checks should prevent this being called on older Python versions from .. import config def minimal_tf_collate_fn(features): if isinstance(features, dict): # case batch_size=None: nothing to collate return features elif config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") first = features[0] batch = {} for k, v in first.items(): if isinstance(v, np.ndarray): batch[k] = np.stack([f[k] for f in features]) elif isinstance(v, tf.Tensor): batch[k] = tf.stack([f[k] for f in features]) else: batch[k] = np.array([f[k] for f in features]) return batch def minimal_tf_collate_fn_with_renaming(features): batch = minimal_tf_collate_fn(features) if "label" in batch: batch["labels"] = batch["label"] del batch["label"] return batch def is_numeric_pa_type(pa_type): if pa.types.is_list(pa_type): return is_numeric_pa_type(pa_type.value_type) return pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type) def np_get_batch( indices, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, return_dict=False ): if not isinstance(indices, np.ndarray): indices = indices.numpy() is_batched = True # Optimization - if we're loading a sequential batch, do it with slicing instead of a list of indices if isinstance(indices, np.integer): batch = dataset[indices.item()] is_batched = False elif np.all(np.diff(indices) == 1): batch = dataset[indices[0] : indices[-1] + 1] elif isinstance(indices, np.ndarray): batch = dataset[indices] else: raise RuntimeError("Unexpected type for indices: {}".format(type(indices))) if cols_to_retain is not None: batch = { key: value for key, value in batch.items() if key in cols_to_retain or key in ("label", "label_ids", "labels") } if is_batched: actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same # Our collators expect a list of dicts, not a dict of lists/arrays, so we invert batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)] batch = collate_fn(batch, **collate_fn_args) if return_dict: out_batch = {} for col, cast_dtype in columns_to_np_types.items(): # In case the collate_fn returns something strange array = np.array(batch[col]) array = array.astype(cast_dtype) out_batch[col] = array else: out_batch = [] for col, cast_dtype in columns_to_np_types.items(): # In case the collate_fn returns something strange array = np.array(batch[col]) array = array.astype(cast_dtype) out_batch.append(array) return out_batch def dataset_to_tf( dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, output_signature, shuffle, batch_size, drop_remainder, ): """Create a tf.data.Dataset from the underlying Dataset. This is a single-process method - the multiprocess equivalent is multiprocess_dataset_to_tf. Args: dataset (`Dataset`): Dataset to wrap with tf.data.Dataset. cols_to_retain (`List[str]`): Dataset column(s) to load in the tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and that do not exist in the original dataset. collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the `collate_fn`. Can be empty. columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes. output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to `tf.TensorSpec` objects. shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided, defaults to the same setting as shuffle. Returns: `tf.data.Dataset` """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") # TODO Matt: When our minimum Python version is 3.8 or higher, we can delete all of this and move everything # to the NumPy multiprocessing path. if hasattr(tf, "random_index_shuffle"): random_index_shuffle = tf.random_index_shuffle elif hasattr(tf.random.experimental, "index_shuffle"): random_index_shuffle = tf.random.experimental.index_shuffle else: if len(dataset) > 10_000_000: warnings.warn( "to_tf_dataset() can be memory-inefficient on versions of TensorFlow older than 2.9. " "If you are iterating over a dataset with a very large number of samples, consider " "upgrading to TF >= 2.9." ) random_index_shuffle = None getter_fn = partial( np_get_batch, dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, return_dict=False, ) # This works because dictionaries always output in the same order tout = [tf.dtypes.as_dtype(dtype) for dtype in columns_to_np_types.values()] @tf.function(input_signature=[tf.TensorSpec(None, tf.int64)]) def fetch_function(indices): output = tf.py_function( getter_fn, inp=[indices], Tout=tout, ) return {key: output[i] for i, key in enumerate(columns_to_np_types.keys())} tf_dataset = tf.data.Dataset.range(len(dataset)) if shuffle and random_index_shuffle is not None: base_seed = tf.fill((3,), value=tf.cast(-1, dtype=tf.int64)) def scan_random_index(state, index): if tf.reduce_all(state == -1): # This generates a new random seed once per epoch only, # to ensure that we iterate over each sample exactly once per epoch state = tf.random.uniform(shape=(3,), maxval=2**62, dtype=tf.int64) shuffled_index = random_index_shuffle(index=index, seed=state, max_index=len(dataset) - 1) return state, shuffled_index tf_dataset = tf_dataset.scan(base_seed, scan_random_index) elif shuffle: tf_dataset = tf_dataset.shuffle(tf_dataset.cardinality()) if batch_size is not None: tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder) tf_dataset = tf_dataset.map(fetch_function) if batch_size is not None: def ensure_shapes(input_dict): return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()} else: # Ensure shape but remove batch dimension of output_signature[key].shape def ensure_shapes(input_dict): return {key: tf.ensure_shape(val, output_signature[key].shape[1:]) for key, val in input_dict.items()} return tf_dataset.map(ensure_shapes) class SharedMemoryContext: # This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted # The process that creates shared memory is always the one responsible for unlinking it in the end def __init__(self): self.created_shms = [] self.opened_shms = [] def get_shm(self, name, size, create): shm = SharedMemory(size=int(size), name=name, create=create) if create: # We only unlink the ones we created in this context self.created_shms.append(shm) else: # If we didn't create it, we only close it when done, we don't unlink it self.opened_shms.append(shm) return shm def get_array(self, name, shape, dtype, create): shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create) return np.ndarray(shape, dtype=dtype, buffer=shm.buf) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): for shm in self.created_shms: shm.close() shm.unlink() for shm in self.opened_shms: shm.close() class NumpyMultiprocessingGenerator: def __init__( self, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, output_signature, shuffle, batch_size, drop_remainder, num_workers, ): self.dataset = dataset self.cols_to_retain = cols_to_retain self.collate_fn = collate_fn self.collate_fn_args = collate_fn_args self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype is np.str_] # Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize self.columns_to_np_types = { col: dtype if col not in self.string_columns else np.dtype("U1") for col, dtype in columns_to_np_types.items() } self.output_signature = output_signature self.shuffle = shuffle self.batch_size = batch_size self.drop_remainder = drop_remainder self.num_workers = num_workers # Because strings are converted to characters, we need to add one extra dimension to the shape self.columns_to_ranks = { col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1 for col, spec in output_signature.items() } def __iter__(self): # Make sure we only spawn workers if they have work to do num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size))) # Do the shuffling in iter so that it's done at the start of each epoch per_worker_batches, final_batch, final_batch_worker = self.distribute_batches( self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle ) ctx = get_context("spawn") names = [] shape_arrays = [] workers = [] array_ready_events = [ctx.Event() for _ in range(num_workers)] array_loaded_events = [ctx.Event() for _ in range(num_workers)] base_args = { "dataset": self.dataset, "cols_to_retain": self.cols_to_retain, "collate_fn": self.collate_fn, "collate_fn_args": self.collate_fn_args, "columns_to_np_types": self.columns_to_np_types, "columns_to_ranks": self.columns_to_ranks, "string_columns": self.string_columns, } with SharedMemoryContext() as shm_ctx: for i in range(num_workers): worker_random_id = str(uuid4()) worker_name = f"dw_{i}_{worker_random_id}"[:10] names.append(worker_name) worker_shape_arrays = { col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True) for col, rank in self.columns_to_ranks.items() } shape_arrays.append(worker_shape_arrays) worker_indices = per_worker_batches[i] if i == final_batch_worker and final_batch is not None: final_batch_arg = final_batch else: final_batch_arg = None worker_kwargs = { "worker_name": worker_name, "indices": worker_indices, "extra_batch": final_batch_arg, "array_ready_event": array_ready_events[i], "array_loaded_event": array_loaded_events[i], **base_args, } worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True) worker.start() workers.append(worker) end_signal_received = False while not end_signal_received: for i in range(num_workers): if not array_ready_events[i].wait(timeout=60): raise TimeoutError("Data loading worker timed out!") array_ready_events[i].clear() array_shapes = shape_arrays[i] if any(np.any(shape < 0) for shape in array_shapes.values()): # Child processes send negative array shapes to indicate # that no more data is going to be sent end_signal_received = True break # Matt: Because array shapes are variable we recreate the shared memory each iteration. # I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process. # A future optimization, at the cost of some code complexity, could be to reuse shared memory # between iterations, but this would require knowing in advance the maximum size, or having # a system to only create a new memory block when a new maximum size is seen. # Another potential optimization would be to figure out which memory copies are necessary, # or whether we can yield objects straight out of shared memory. with SharedMemoryContext() as batch_shm_ctx: # This memory context only lasts long enough to copy everything out of the batch arrays = { col: batch_shm_ctx.get_array( f"{names[i]}_{col}", shape=shape, dtype=self.columns_to_np_types[col], create=False, ) for col, shape in array_shapes.items() } # Copy everything out of shm because the memory # will be unlinked by the child process at some point arrays = {col: np.copy(arr) for col, arr in arrays.items()} # Now we convert any unicode char arrays to strings for string_col in self.string_columns: arrays[string_col] = ( arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1) ) yield arrays array_loaded_events[i].set() # Now we just do some cleanup # Shared memory is cleaned up by the context manager, so we just make sure workers finish for worker in workers: worker.join() def __call__(self): return self @staticmethod def worker_loop( dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, columns_to_ranks, string_columns, indices, extra_batch, worker_name, array_ready_event, array_loaded_event, ): os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory def send_batch_to_parent(indices): batch = np_get_batch( indices=indices, dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, return_dict=True, ) # Now begins the fun part where we start shovelling shared memory at the parent process out_arrays = {} with SharedMemoryContext() as batch_shm_ctx: # The batch shared memory context exists only as long as it takes for the parent process # to read everything, after which it cleans everything up again for col, cast_dtype in columns_to_np_types.items(): # Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor array = batch[col] if col in string_columns: # We can't send unicode arrays over shared memory, so we convert to single chars ("U1") # which have a fixed width of 4 bytes. The parent process will convert these back to strings. array = array.view("U1").reshape(array.shape + (-1,)) shape_arrays[col][:] = array.shape out_arrays[col] = batch_shm_ctx.get_array( f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True ) out_arrays[col][:] = array array_ready_event.set() array_loaded_event.wait() array_loaded_event.clear() with SharedMemoryContext() as shm_ctx: shape_arrays = { col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False) for col, rank in columns_to_ranks.items() } for batch in indices: send_batch_to_parent(batch) if extra_batch is not None: send_batch_to_parent(extra_batch) # Now we send a batsignal to the parent process that we're done for col, array in shape_arrays.items(): array[:] = -1 array_ready_event.set() @staticmethod def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle): indices = np.arange(len(dataset)) if shuffle: np.random.shuffle(indices) num_samples = len(indices) # We distribute the batches so that reading from the workers in round-robin order yields the exact # order specified in indices. This is only important when shuffle is False, but we do it regardless. incomplete_batch_cutoff = num_samples - (num_samples % batch_size) indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff]) if drop_remainder or len(last_incomplete_batch) == 0: last_incomplete_batch = None indices = indices.reshape(-1, batch_size) num_batches = len(indices) final_batches_cutoff = num_batches - (num_batches % num_workers) indices, final_batches = np.split(indices, [final_batches_cutoff]) indices = indices.reshape(-1, num_workers, batch_size) per_worker_indices = np.split(indices, indices.shape[1], axis=1) per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices] # Distribute the final batches to the first workers for i in range(len(final_batches)): # len(final_batches) can be zero, and is always less than num_workers per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0) # Add the last incomplete batch to the next worker, which might be the first worker if last_incomplete_batch is not None: incomplete_batch_worker_idx = len(final_batches) else: incomplete_batch_worker_idx = None return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx def multiprocess_dataset_to_tf( dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, output_signature, shuffle, batch_size, drop_remainder, num_workers, ): """Create a tf.data.Dataset from the underlying Dataset. This is a multi-process method - the single-process equivalent is dataset_to_tf. Args: dataset (`Dataset`): Dataset to wrap with tf.data.Dataset. cols_to_retain (`List[str]`): Dataset column(s) to load in the tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and that do not exist in the original dataset. collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the `collate_fn`. Can be empty. columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes. output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to `tf.TensorSpec` objects. shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided, defaults to the same setting as shuffle. num_workers (`int`): Number of workers to use for loading the dataset. Should be >= 1. Returns: `tf.data.Dataset` """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") data_generator = NumpyMultiprocessingGenerator( dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, output_signature=output_signature, shuffle=shuffle, batch_size=batch_size, drop_remainder=drop_remainder, num_workers=num_workers, ) tf_dataset = tf.data.Dataset.from_generator(data_generator, output_signature=output_signature) if drop_remainder: dataset_length = int(len(dataset) // batch_size) else: dataset_length = int(ceil(len(dataset) / batch_size)) return tf_dataset.apply(tf.data.experimental.assert_cardinality(dataset_length))
datasets/src/datasets/utils/tf_utils.py/0
{ "file_path": "datasets/src/datasets/utils/tf_utils.py", "repo_id": "datasets", "token_count": 10951 }
import posixpath from pathlib import Path from unittest.mock import patch import pytest from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path from fsspec.registry import _registry as _fsspec_registry class MockFileSystem(AbstractFileSystem): protocol = "mock" def __init__(self, *args, local_root_dir, **kwargs): super().__init__() self._fs = LocalFileSystem(*args, **kwargs) self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/" def mkdir(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.mkdir(path, *args, **kwargs) def makedirs(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.makedirs(path, *args, **kwargs) def rmdir(self, path): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.rmdir(path) def ls(self, path, detail=True, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) out = self._fs.ls(path, detail=detail, *args, **kwargs) if detail: return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out] else: return [name[len(self.local_root_dir) :] for name in out] def info(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) out = dict(self._fs.info(path, *args, **kwargs)) out["name"] = out["name"][len(self.local_root_dir) :] return out def cp_file(self, path1, path2, *args, **kwargs): path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1)) path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2)) return self._fs.cp_file(path1, path2, *args, **kwargs) def rm_file(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.rm_file(path, *args, **kwargs) def rm(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.rm(path, *args, **kwargs) def _open(self, path, *args, **kwargs): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs._open(path, *args, **kwargs) def created(self, path): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.created(path) def modified(self, path): path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) return self._fs.modified(path) @classmethod def _strip_protocol(cls, path): path = stringify_path(path) if path.startswith("mock://"): path = path[7:] return path class TmpDirFileSystem(MockFileSystem): protocol = "tmp" tmp_dir = None def __init__(self, *args, **kwargs): assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set" super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True) @classmethod def _strip_protocol(cls, path): path = stringify_path(path) if path.startswith("tmp://"): path = path[6:] return path @pytest.fixture def mock_fsspec(): _fsspec_registry["mock"] = MockFileSystem _fsspec_registry["tmp"] = TmpDirFileSystem yield del _fsspec_registry["mock"] del _fsspec_registry["tmp"] @pytest.fixture def mockfs(tmp_path_factory, mock_fsspec): local_fs_dir = tmp_path_factory.mktemp("mockfs") return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True) @pytest.fixture def tmpfs(tmp_path_factory, mock_fsspec): tmp_fs_dir = tmp_path_factory.mktemp("tmpfs") with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir): yield TmpDirFileSystem() TmpDirFileSystem.clear_instance_cache()
datasets/tests/fixtures/fsspec.py/0
{ "file_path": "datasets/tests/fixtures/fsspec.py", "repo_id": "datasets", "token_count": 1757 }
import importlib import shutil import textwrap import pytest from datasets import ClassLabel, DownloadManager, Features, Value from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesDict, DataFilesList, get_data_patterns from datasets.download.streaming_download_manager import StreamingDownloadManager from datasets.packaged_modules.folder_based_builder.folder_based_builder import ( FolderBasedBuilder, FolderBasedBuilderConfig, ) remote_files = [ "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hallo.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hello.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour2.txt", ] class DummyFolderBasedBuilder(FolderBasedBuilder): BASE_FEATURE = dict BASE_COLUMN_NAME = "base" BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig EXTENSIONS = [".txt"] @pytest.fixture def cache_dir(tmp_path): return str(tmp_path / "autofolder_cache_dir") @pytest.fixture def auto_text_file(text_file): return str(text_file) @pytest.fixture def data_files_with_labels_no_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "data_files_with_labels_no_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_labels_no_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_labels_no_metadata @pytest.fixture def data_files_with_different_levels_no_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "data_files_with_different_levels" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "subdir" / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_different_levels = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_different_levels @pytest.fixture def data_files_with_one_label_no_metadata(tmp_path, auto_text_file): # only one label found = all files in a single dir/in a root dir data_dir = tmp_path / "data_files_with_one_label" data_dir.mkdir(parents=True, exist_ok=True) filename = data_dir / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_one_label = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) return data_files_with_one_label @pytest.fixture def files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "files_with_labels_and_label_key_in_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file_class0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file_class1.txt" shutil.copyfile(auto_text_file, filename2) metadata_filename = tmp_path / data_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "class0/file_class0.txt", "additional_feature": "First dummy file", "label": "CLASS_0"} {"file_name": "class1/file_class1.txt", "additional_feature": "Second dummy file", "label": "CLASS_1"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(filename2), str(metadata_filename) @pytest.fixture def file_with_metadata(tmp_path, text_file): filename = tmp_path / "file.txt" shutil.copyfile(text_file, filename) metadata_filename = tmp_path / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(metadata_filename) @pytest.fixture() def files_with_metadata_that_misses_one_sample(tmp_path, auto_text_file): filename = tmp_path / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = tmp_path / "file2.txt" shutil.copyfile(auto_text_file, filename2) metadata_filename = tmp_path / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(filename2), str(metadata_filename) @pytest.fixture def data_files_with_one_split_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_one_split" data_dir.mkdir(parents=True, exist_ok=True) subdir = data_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) filename = data_dir / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / "file2.txt" shutil.copyfile(auto_text_file, filename2) filename3 = subdir / "file3.txt" # in subdir shutil.copyfile(auto_text_file, filename3) metadata_filename = data_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} {"file_name": "file2.txt", "additional_feature": "Second dummy file"} {"file_name": "./subdir/file3.txt", "additional_feature": "Third dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_one_split_and_metadata) == 1 assert len(data_files_with_one_split_and_metadata["train"]) == 4 return data_files_with_one_split_and_metadata @pytest.fixture def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_two_splits" data_dir.mkdir(parents=True, exist_ok=True) train_dir = data_dir / "train" train_dir.mkdir(parents=True, exist_ok=True) test_dir = data_dir / "test" test_dir.mkdir(parents=True, exist_ok=True) filename = train_dir / "file.txt" # train shutil.copyfile(auto_text_file, filename) filename2 = train_dir / "file2.txt" # train shutil.copyfile(auto_text_file, filename2) filename3 = test_dir / "file3.txt" # test shutil.copyfile(auto_text_file, filename3) train_metadata_filename = train_dir / "metadata.jsonl" train_metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Train dummy file"} {"file_name": "file2.txt", "additional_feature": "Second train dummy file"} """ ) with open(train_metadata_filename, "w", encoding="utf-8") as f: f.write(train_metadata) test_metadata_filename = test_dir / "metadata.jsonl" test_metadata = textwrap.dedent( """\ {"file_name": "file3.txt", "additional_feature": "Test dummy file"} """ ) with open(test_metadata_filename, "w", encoding="utf-8") as f: f.write(test_metadata) data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_two_splits_and_metadata) == 2 assert len(data_files_with_two_splits_and_metadata["train"]) == 3 assert len(data_files_with_two_splits_and_metadata["test"]) == 2 return data_files_with_two_splits_and_metadata @pytest.fixture def data_files_with_zip_archives(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_zip_archives" data_dir.mkdir(parents=True, exist_ok=True) archive_dir = data_dir / "archive" archive_dir.mkdir(parents=True, exist_ok=True) subdir = archive_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) filename = archive_dir / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir / "file2.txt" # in subdir shutil.copyfile(auto_text_file, filename2) metadata_filename = archive_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} {"file_name": "subdir/file2.txt", "additional_feature": "Second dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) shutil.make_archive(archive_dir, "zip", archive_dir) shutil.rmtree(str(archive_dir)) data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) assert len(data_files_with_zip_archives) == 1 assert len(data_files_with_zip_archives["train"]) == 1 return data_files_with_zip_archives def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = FolderBasedBuilderConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = FolderBasedBuilderConfig(name="name", data_files=data_files) def test_inferring_labels_from_data_dirs(data_files_with_labels_no_metadata, cache_dir): autofolder = DummyFolderBasedBuilder( data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs assert autofolder.info.features == Features({"base": {}, "label": ClassLabel(names=["class0", "class1"])}) generator = autofolder._generate_examples(**gen_kwargs) assert all(example["label"] in {"class0", "class1"} for _, example in generator) def test_default_folder_builder_not_usable(data_files_with_labels_no_metadata, cache_dir): # builder would try to access non-existing attributes of a default `BuilderConfig` class # as a custom one is not provided with pytest.raises(AttributeError): _ = FolderBasedBuilder( data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, ) # test that AutoFolder is extended for streaming when it's child class is instantiated: # see line 115 in src/datasets/streaming.py def test_streaming_patched(): _ = DummyFolderBasedBuilder() module = importlib.import_module(FolderBasedBuilder.__module__) assert hasattr(module, "_patched_for_streaming") assert module._patched_for_streaming @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_duplicated_label_key( files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog ): class0_file, class1_file, metadata_file = files_with_labels_and_duplicated_label_key_in_metadata autofolder = DummyFolderBasedBuilder( data_files=[class0_file, class1_file, metadata_file], cache_dir=cache_dir, drop_metadata=drop_metadata, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is False: # infer labels from directories even if metadata files are found warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records) assert warning_in_logs if drop_metadata is not True else not warning_in_logs assert autofolder.info.features["label"] == ClassLabel(names=["class0", "class1"]) assert all(example["label"] in ["class0", "class1"] for _, example in generator) else: if drop_metadata is not True: # labels are from metadata assert autofolder.info.features["label"] == Value("string") assert all(example["label"] in ["CLASS_0", "CLASS_1"] for _, example in generator) else: # drop both labels and metadata assert autofolder.info.features == Features({"base": {}}) assert all(example.keys() == {"base"} for _, example in generator) @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_labels( data_files_with_labels_no_metadata, auto_text_file, drop_metadata, drop_labels, cache_dir ): autofolder = DummyFolderBasedBuilder( data_files=data_files_with_labels_no_metadata, drop_metadata=drop_metadata, drop_labels=drop_labels, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # removing labels explicitly requires drop_labels=True assert gen_kwargs["add_labels"] is not bool(drop_labels) assert gen_kwargs["add_metadata"] is False generator = autofolder._generate_examples(**gen_kwargs) if not drop_labels: assert all( example.keys() == {"base", "label"} and all(val is not None for val in example.values()) for _, example in generator ) else: assert all( example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator ) @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_metadata(file_with_metadata, drop_metadata, drop_labels, cache_dir): file, metadata_file = file_with_metadata autofolder = DummyFolderBasedBuilder( data_files=[file, metadata_file], drop_metadata=drop_metadata, drop_labels=drop_labels, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True assert gen_kwargs["add_metadata"] is not bool(drop_metadata) # since the dataset has metadata, adding the labels explicitly requires drop_labels=False assert gen_kwargs["add_labels"] is (drop_labels is False) generator = autofolder._generate_examples(**gen_kwargs) expected_columns = {"base"} if gen_kwargs["add_metadata"]: expected_columns.add("additional_feature") if gen_kwargs["add_labels"]: expected_columns.add("label") result = [example for _, example in generator] assert len(result) == 1 example = result[0] assert example.keys() == expected_columns for column in expected_columns: assert example[column] is not None @pytest.mark.parametrize("remote", [True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_data_files_with_different_levels_no_metadata( data_files_with_different_levels_no_metadata, drop_labels, remote, cache_dir ): data_files = remote_files if remote else data_files_with_different_levels_no_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is not False: # with None (default) we should drop labels if files are on different levels in dir structure assert "label" not in autofolder.info.features assert all(example.keys() == {"base"} for _, example in generator) else: assert "label" in autofolder.info.features assert isinstance(autofolder.info.features["label"], ClassLabel) assert all(example.keys() == {"base", "label"} for _, example in generator) @pytest.mark.parametrize("remote", [False, True]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_data_files_with_one_label_no_metadata(data_files_with_one_label_no_metadata, drop_labels, remote, cache_dir): data_files = remote_files[:2] if remote else data_files_with_one_label_no_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is not False: # with None (default) we should drop labels if only one label is found (=if there is a single dir) assert "label" not in autofolder.info.features assert all(example.keys() == {"base"} for _, example in generator) else: assert "label" in autofolder.info.features assert isinstance(autofolder.info.features["label"], ClassLabel) assert all(example.keys() == {"base", "label"} for _, example in generator) @pytest.mark.parametrize("drop_metadata", [None, True, False]) def test_data_files_with_metadata_that_misses_one_sample( files_with_metadata_that_misses_one_sample, drop_metadata, cache_dir ): file, file2, metadata_file = files_with_metadata_that_misses_one_sample if not drop_metadata: features = Features({"base": None, "additional_feature": Value("string")}) else: features = Features({"base": None}) autofolder = DummyFolderBasedBuilder( data_files=[file, file2, metadata_file], drop_metadata=drop_metadata, features=features, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if not drop_metadata: with pytest.raises(ValueError): list(generator) else: assert all( example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator ) @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("n_splits", [1, 2]) def test_data_files_with_metadata_and_splits( streaming, cache_dir, n_splits, data_files_with_one_split_and_metadata, data_files_with_two_splits_and_metadata ): data_files = data_files_with_one_split_and_metadata if n_splits == 1 else data_files_with_two_splits_and_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, ) download_manager = StreamingDownloadManager() if streaming else DownloadManager() generated_splits = autofolder._split_generators(download_manager) for (split, files), generated_split in zip(data_files.items(), generated_splits): assert split == generated_split.name expected_num_of_examples = len(files) - 1 generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) assert len(generated_examples) == expected_num_of_examples assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples assert all(example["additional_feature"] is not None for _, example in generated_examples) @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): autofolder = DummyFolderBasedBuilder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) download_manager = StreamingDownloadManager() if streaming else DownloadManager() generated_splits = autofolder._split_generators(download_manager) for (split, files), generated_split in zip(data_files_with_zip_archives.items(), generated_splits): assert split == generated_split.name num_of_archives = len(files) expected_num_of_examples = 2 * num_of_archives generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) assert len(generated_examples) == expected_num_of_examples assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples assert all(example["additional_feature"] is not None for _, example in generated_examples) def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, auto_text_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(auto_text_file, data_dir / "file.txt") metadata_filename = data_dir / "bad_metadata.jsonl" # bad file metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) assert all("additional_feature" not in example for _, example in generator) def test_data_files_with_wrong_file_name_column_in_metadata_file(cache_dir, tmp_path, auto_text_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(auto_text_file, data_dir / "file.txt") metadata_filename = data_dir / "metadata.jsonl" metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" """\ {"bad_file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) with pytest.raises(ValueError) as exc_info: _ = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs assert "`file_name` must be present" in str(exc_info.value)
datasets/tests/packaged_modules/test_folder_based_builder.py/0
{ "file_path": "datasets/tests/packaged_modules/test_folder_based_builder.py", "repo_id": "datasets", "token_count": 9076 }
import os import sys from pathlib import Path import pytest from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch def test_split_dataset_by_node_map_style(): full_ds = Dataset.from_dict({"i": range(17)}) full_size = len(full_ds) world_size = 3 datasets_per_rank = [ split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size) ] assert sum(len(ds) for ds in datasets_per_rank) == full_size assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size def test_split_dataset_by_node_iterable(): def gen(): return ({"i": i} for i in range(17)) world_size = 3 full_ds = IterableDataset.from_generator(gen) full_size = len(list(full_ds)) datasets_per_rank = [ split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size) ] assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size @pytest.mark.parametrize("shards_per_node", [1, 2, 3]) def test_split_dataset_by_node_iterable_sharded(shards_per_node): def gen(shards): for shard in shards: yield from ({"i": i, "shard": shard} for i in range(17)) world_size = 3 num_shards = shards_per_node * world_size gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]} full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs) full_size = len(list(full_ds)) assert full_ds.num_shards == world_size * shards_per_node datasets_per_rank = [ split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size) ] assert [ds.num_shards for ds in datasets_per_rank] == [shards_per_node] * world_size assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size def test_split_dataset_by_node_iterable_distributed(): def gen(): return ({"i": i} for i in range(100)) world_size = 3 num_workers = 3 full_ds = IterableDataset.from_generator(gen) full_size = len(list(full_ds)) datasets_per_rank = [ split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size) ] datasets_per_rank_per_worker = [ split_dataset_by_node(ds, rank=worker, world_size=num_workers) for ds in datasets_per_rank for worker in range(num_workers) ] assert sum(len(list(ds)) for ds in datasets_per_rank_per_worker) == full_size assert len({tuple(x.values()) for ds in datasets_per_rank_per_worker for x in ds}) == full_size def test_distributed_shuffle_iterable(): def gen(): return ({"i": i} for i in range(17)) world_size = 2 full_ds = IterableDataset.from_generator(gen) full_size = len(list(full_ds)) ds_rank0 = split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle(seed=42) assert len(list(ds_rank0)) == 1 + full_size // world_size with pytest.raises(RuntimeError): split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle() ds_rank0 = split_dataset_by_node(full_ds.shuffle(seed=42), rank=0, world_size=world_size) assert len(list(ds_rank0)) == 1 + full_size // world_size with pytest.raises(RuntimeError): split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size) @pytest.mark.parametrize("streaming", [False, True]) @require_torch @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows") @pytest.mark.integration def test_torch_distributed_run(streaming): nproc_per_node = 2 master_port = get_torch_dist_unique_port() test_script = Path(__file__).resolve().parent / "distributed_scripts" / "run_torch_distributed.py" distributed_args = f""" -m torch.distributed.run --nproc_per_node={nproc_per_node} --master_port={master_port} {test_script} """.split() args = f""" --streaming={streaming} """.split() cmd = [sys.executable] + distributed_args + args execute_subprocess_async(cmd, env=os.environ.copy()) @pytest.mark.parametrize( "nproc_per_node, num_workers", [ (2, 2), # each node has 2 shards and each worker has 1 shards (3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards ], ) @require_torch @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows") @pytest.mark.integration def test_torch_distributed_run_streaming_with_num_workers(nproc_per_node, num_workers): streaming = True master_port = get_torch_dist_unique_port() test_script = Path(__file__).resolve().parent / "distributed_scripts" / "run_torch_distributed.py" distributed_args = f""" -m torch.distributed.run --nproc_per_node={nproc_per_node} --master_port={master_port} {test_script} """.split() args = f""" --streaming={streaming} --num_workers={num_workers} """.split() cmd = [sys.executable] + distributed_args + args execute_subprocess_async(cmd, env=os.environ.copy())
datasets/tests/test_distributed.py/0
{ "file_path": "datasets/tests/test_distributed.py", "repo_id": "datasets", "token_count": 2244 }
import re import sys import tempfile import unittest from pathlib import Path import pytest import yaml from huggingface_hub import DatasetCard, DatasetCardData from datasets.config import METADATA_CONFIGS_FIELD from datasets.features import Features, Value from datasets.info import DatasetInfo from datasets.utils.metadata import MetadataConfigs def _dedent(string: str) -> str: indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines()) return "\n".join([line[indent_level:] for line in string.splitlines() if indent_level < len(line)]) README_YAML = """\ --- language: - zh - en task_ids: - sentiment-classification --- # Begin of markdown Some cool dataset card """ README_EMPTY_YAML = """\ --- --- # Begin of markdown Some cool dataset card """ README_NO_YAML = """\ # Begin of markdown Some cool dataset card """ README_METADATA_CONFIG_INCORRECT_FORMAT = f"""\ --- {METADATA_CONFIGS_FIELD}: data_dir: v1 drop_labels: true --- """ README_METADATA_SINGLE_CONFIG = f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom data_dir: v1 drop_labels: true --- """ README_METADATA_TWO_CONFIGS_WITH_DEFAULT_FLAG = f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: v1 data_dir: v1 drop_labels: true - config_name: v2 data_dir: v2 drop_labels: false default: true --- """ README_METADATA_TWO_CONFIGS_WITH_DEFAULT_NAME = f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom data_dir: custom drop_labels: true - config_name: default data_dir: data drop_labels: false --- """ README_METADATA_WITH_FEATURES = f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: default features: - name: id dtype: int64 - name: name dtype: string - name: score dtype: float64 --- """ EXPECTED_METADATA_SINGLE_CONFIG = {"custom": {"data_dir": "v1", "drop_labels": True}} EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_FLAG = { "v1": {"data_dir": "v1", "drop_labels": True}, "v2": {"data_dir": "v2", "drop_labels": False, "default": True}, } EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_NAME = { "custom": {"data_dir": "custom", "drop_labels": True}, "default": {"data_dir": "data", "drop_labels": False}, } EXPECTED_METADATA_WITH_FEATURES = { "default": { "features": Features( {"id": Value(dtype="int64"), "name": Value(dtype="string"), "score": Value(dtype="float64")} ) } } @pytest.fixture def data_dir_with_two_subdirs(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") return str(data_dir) class TestMetadataUtils(unittest.TestCase): def test_metadata_dict_from_readme(self): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) / "README.md" with open(path, "w+") as readme_file: readme_file.write(README_YAML) dataset_card_data = DatasetCard.load(path).data self.assertDictEqual( dataset_card_data.to_dict(), {"language": ["zh", "en"], "task_ids": ["sentiment-classification"]} ) with open(path, "w+") as readme_file: readme_file.write(README_EMPTY_YAML) if ( sys.platform != "win32" ): # there is a bug on windows, see https://github.com/huggingface/huggingface_hub/issues/1546 dataset_card_data = DatasetCard.load(path).data self.assertDictEqual(dataset_card_data.to_dict(), {}) with open(path, "w+") as readme_file: readme_file.write(README_NO_YAML) dataset_card_data = DatasetCard.load(path).data self.assertEqual(dataset_card_data.to_dict(), {}) def test_from_yaml_string(self): valid_yaml_string = _dedent( """\ annotations_creators: - found language_creators: - found language: - en license: - unknown multilinguality: - monolingual pretty_name: Test Dataset size_categories: - 10K<n<100K source_datasets: - extended|other-yahoo-webscope-l6 task_categories: - question-answering task_ids: - open-domain-qa """ ) assert DatasetCardData(**yaml.safe_load(valid_yaml_string)).to_dict() valid_yaml_with_optional_keys = _dedent( """\ annotations_creators: - found language_creators: - found language: - en license: - unknown multilinguality: - monolingual pretty_name: Test Dataset size_categories: - 10K<n<100K source_datasets: - extended|other-yahoo-webscope-l6 task_categories: - text-classification task_ids: - multi-class-classification paperswithcode_id: - squad configs: - en train-eval-index: - config: en task: text-classification task_id: multi_class_classification splits: train_split: train eval_split: test col_mapping: text: text label: target metrics: - type: accuracy name: Accuracy extra_gated_prompt: | By clicking on “Access repository” below, you also agree to ImageNet Terms of Access: [RESEARCHER_FULLNAME] (the "Researcher") has requested permission to use the ImageNet database (the "Database") at Princeton University and Stanford University. In exchange for such permission, Researcher hereby agrees to the following terms and conditions: 1. Researcher shall use the Database only for non-commercial research and educational purposes. extra_gated_fields: Company: text Country: text I agree to use this model for non-commerical use ONLY: checkbox """ ) assert DatasetCardData(**yaml.safe_load(valid_yaml_with_optional_keys)).to_dict() @pytest.mark.parametrize( "readme_content, expected_metadata_configs_dict, expected_default_config_name", [ (README_METADATA_SINGLE_CONFIG, EXPECTED_METADATA_SINGLE_CONFIG, "custom"), (README_METADATA_TWO_CONFIGS_WITH_DEFAULT_FLAG, EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_FLAG, "v2"), (README_METADATA_TWO_CONFIGS_WITH_DEFAULT_NAME, EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_NAME, "default"), (README_METADATA_WITH_FEATURES, EXPECTED_METADATA_WITH_FEATURES, "default"), ], ) def test_metadata_configs_dataset_card_data( readme_content, expected_metadata_configs_dict, expected_default_config_name ): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) / "README.md" with open(path, "w+") as readme_file: readme_file.write(readme_content) dataset_card_data = DatasetCard.load(path).data metadata_configs_dict = MetadataConfigs.from_dataset_card_data(dataset_card_data) assert metadata_configs_dict == expected_metadata_configs_dict assert metadata_configs_dict.get_default_config_name() == expected_default_config_name def test_metadata_configs_incorrect_yaml(): with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) / "README.md" with open(path, "w+") as readme_file: readme_file.write(README_METADATA_CONFIG_INCORRECT_FORMAT) dataset_card_data = DatasetCard.load(path).data with pytest.raises(ValueError): _ = MetadataConfigs.from_dataset_card_data(dataset_card_data) def test_split_order_in_metadata_configs_from_exported_parquet_files_and_dataset_infos(): exported_parquet_files = [ { "dataset": "beans", "config": "default", "split": "test", "url": "https://huggingface.co/datasets/beans/resolve/refs%2Fconvert%2Fparquet/default/test/0000.parquet", "filename": "0000.parquet", "size": 17707203, }, { "dataset": "beans", "config": "default", "split": "train", "url": "https://huggingface.co/datasets/beans/resolve/refs%2Fconvert%2Fparquet/default/train/0000.parquet", "filename": "0000.parquet", "size": 143780164, }, { "dataset": "beans", "config": "default", "split": "validation", "url": "https://huggingface.co/datasets/beans/resolve/refs%2Fconvert%2Fparquet/default/validation/0000.parquet", "filename": "0000.parquet", "size": 18500862, }, ] dataset_infos = { "default": DatasetInfo( dataset_name="beans", config_name="default", version="0.0.0", splits={ "train": { "name": "train", "num_bytes": 143996486, "num_examples": 1034, "shard_lengths": None, "dataset_name": "beans", }, "validation": { "name": "validation", "num_bytes": 18525985, "num_examples": 133, "shard_lengths": None, "dataset_name": "beans", }, "test": { "name": "test", "num_bytes": 17730506, "num_examples": 128, "shard_lengths": None, "dataset_name": "beans", }, }, download_checksums={ "https://huggingface.co/datasets/beans/resolve/main/data/train.zip": { "num_bytes": 143812152, "checksum": None, }, "https://huggingface.co/datasets/beans/resolve/main/data/validation.zip": { "num_bytes": 18504213, "checksum": None, }, "https://huggingface.co/datasets/beans/resolve/main/data/test.zip": { "num_bytes": 17708541, "checksum": None, }, }, download_size=180024906, post_processing_size=None, dataset_size=180252977, size_in_bytes=360277883, ) } metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos( "123", exported_parquet_files, dataset_infos ) split_names = [data_file["split"] for data_file in metadata_configs["default"]["data_files"]] assert split_names == ["train", "validation", "test"]
datasets/tests/test_metadata_util.py/0
{ "file_path": "datasets/tests/test_metadata_util.py", "repo_id": "datasets", "token_count": 5718 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Attention Processor An attention processor is a class for applying different types of attention mechanisms. ## AttnProcessor [[autodoc]] models.attention_processor.AttnProcessor [[autodoc]] models.attention_processor.AttnProcessor2_0 [[autodoc]] models.attention_processor.AttnAddedKVProcessor [[autodoc]] models.attention_processor.AttnAddedKVProcessor2_0 [[autodoc]] models.attention_processor.AttnProcessorNPU [[autodoc]] models.attention_processor.FusedAttnProcessor2_0 ## Allegro [[autodoc]] models.attention_processor.AllegroAttnProcessor2_0 ## AuraFlow [[autodoc]] models.attention_processor.AuraFlowAttnProcessor2_0 [[autodoc]] models.attention_processor.FusedAuraFlowAttnProcessor2_0 ## CogVideoX [[autodoc]] models.attention_processor.CogVideoXAttnProcessor2_0 [[autodoc]] models.attention_processor.FusedCogVideoXAttnProcessor2_0 ## CrossFrameAttnProcessor [[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor ## Custom Diffusion [[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor [[autodoc]] models.attention_processor.CustomDiffusionAttnProcessor2_0 [[autodoc]] models.attention_processor.CustomDiffusionXFormersAttnProcessor ## Flux [[autodoc]] models.attention_processor.FluxAttnProcessor2_0 [[autodoc]] models.attention_processor.FusedFluxAttnProcessor2_0 [[autodoc]] models.attention_processor.FluxSingleAttnProcessor2_0 ## Hunyuan [[autodoc]] models.attention_processor.HunyuanAttnProcessor2_0 [[autodoc]] models.attention_processor.FusedHunyuanAttnProcessor2_0 [[autodoc]] models.attention_processor.PAGHunyuanAttnProcessor2_0 [[autodoc]] models.attention_processor.PAGCFGHunyuanAttnProcessor2_0 ## IdentitySelfAttnProcessor2_0 [[autodoc]] models.attention_processor.PAGIdentitySelfAttnProcessor2_0 [[autodoc]] models.attention_processor.PAGCFGIdentitySelfAttnProcessor2_0 ## IP-Adapter [[autodoc]] models.attention_processor.IPAdapterAttnProcessor [[autodoc]] models.attention_processor.IPAdapterAttnProcessor2_0 [[autodoc]] models.attention_processor.SD3IPAdapterJointAttnProcessor2_0 ## JointAttnProcessor2_0 [[autodoc]] models.attention_processor.JointAttnProcessor2_0 [[autodoc]] models.attention_processor.PAGJointAttnProcessor2_0 [[autodoc]] models.attention_processor.PAGCFGJointAttnProcessor2_0 [[autodoc]] models.attention_processor.FusedJointAttnProcessor2_0 ## LoRA [[autodoc]] models.attention_processor.LoRAAttnProcessor [[autodoc]] models.attention_processor.LoRAAttnProcessor2_0 [[autodoc]] models.attention_processor.LoRAAttnAddedKVProcessor [[autodoc]] models.attention_processor.LoRAXFormersAttnProcessor ## Lumina-T2X [[autodoc]] models.attention_processor.LuminaAttnProcessor2_0 ## Mochi [[autodoc]] models.attention_processor.MochiAttnProcessor2_0 [[autodoc]] models.attention_processor.MochiVaeAttnProcessor2_0 ## Sana [[autodoc]] models.attention_processor.SanaLinearAttnProcessor2_0 [[autodoc]] models.attention_processor.SanaMultiscaleAttnProcessor2_0 [[autodoc]] models.attention_processor.PAGCFGSanaLinearAttnProcessor2_0 [[autodoc]] models.attention_processor.PAGIdentitySanaLinearAttnProcessor2_0 ## Stable Audio [[autodoc]] models.attention_processor.StableAudioAttnProcessor2_0 ## SlicedAttnProcessor [[autodoc]] models.attention_processor.SlicedAttnProcessor [[autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor ## XFormersAttnProcessor [[autodoc]] models.attention_processor.XFormersAttnProcessor [[autodoc]] models.attention_processor.XFormersAttnAddedKVProcessor ## XLAFlashAttnProcessor2_0 [[autodoc]] models.attention_processor.XLAFlashAttnProcessor2_0
diffusers/docs/source/en/api/attnprocessor.md/0
{ "file_path": "diffusers/docs/source/en/api/attnprocessor.md", "repo_id": "diffusers", "token_count": 1472 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNetUnion ControlNetUnionModel is an implementation of ControlNet for Stable Diffusion XL. The ControlNet model was introduced in [ControlNetPlus](https://github.com/xinsir6/ControlNetPlus) by xinsir6. It supports multiple conditioning inputs without increasing computation. *We design a new architecture that can support 10+ control types in condition text-to-image generation and can generate high resolution images visually comparable with midjourney. The network is based on the original ControlNet architecture, we propose two new modules to: 1 Extend the original ControlNet to support different image conditions using the same network parameter. 2 Support multiple conditions input without increasing computation offload, which is especially important for designers who want to edit image in detail, different conditions use the same condition encoder, without adding extra computations or parameters.* ## StableDiffusionXLControlNetUnionPipeline [[autodoc]] StableDiffusionXLControlNetUnionPipeline - all - __call__ ## StableDiffusionXLControlNetUnionImg2ImgPipeline [[autodoc]] StableDiffusionXLControlNetUnionImg2ImgPipeline - all - __call__ ## StableDiffusionXLControlNetUnionInpaintPipeline [[autodoc]] StableDiffusionXLControlNetUnionInpaintPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/controlnet_union.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/controlnet_union.md", "repo_id": "diffusers", "token_count": 462 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Habana Gaudi 🤗 Diffusers is compatible with Habana Gaudi through 🤗 [Optimum](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion). Follow the [installation](https://docs.habana.ai/en/latest/Installation_Guide/index.html) guide to install the SynapseAI and Gaudi drivers, and then install Optimum Habana: ```bash python -m pip install --upgrade-strategy eager optimum[habana] ``` To generate images with Stable Diffusion 1 and 2 on Gaudi, you need to instantiate two instances: - [`~optimum.habana.diffusers.GaudiStableDiffusionPipeline`], a pipeline for text-to-image generation. - [`~optimum.habana.diffusers.GaudiDDIMScheduler`], a Gaudi-optimized scheduler. When you initialize the pipeline, you have to specify `use_habana=True` to deploy it on HPUs and to get the fastest possible generation, you should enable **HPU graphs** with `use_hpu_graphs=True`. Finally, specify a [`~optimum.habana.GaudiConfig`] which can be downloaded from the [Habana](https://huggingface.co/Habana) organization on the Hub. ```python from optimum.habana import GaudiConfig from optimum.habana.diffusers import GaudiDDIMScheduler, GaudiStableDiffusionPipeline model_name = "stabilityai/stable-diffusion-2-base" scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler") pipeline = GaudiStableDiffusionPipeline.from_pretrained( model_name, scheduler=scheduler, use_habana=True, use_hpu_graphs=True, gaudi_config="Habana/stable-diffusion-2", ) ``` Now you can call the pipeline to generate images by batches from one or several prompts: ```python outputs = pipeline( prompt=[ "High quality photo of an astronaut riding a horse in space", "Face of a yellow cat, high resolution, sitting on a park bench", ], num_images_per_prompt=10, batch_size=4, ) ``` For more information, check out 🤗 Optimum Habana's [documentation](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion) and the [example](https://github.com/huggingface/optimum-habana/tree/main/examples/stable-diffusion) provided in the official GitHub repository. ## Benchmark We benchmarked Habana's first-generation Gaudi and Gaudi2 with the [Habana/stable-diffusion](https://huggingface.co/Habana/stable-diffusion) and [Habana/stable-diffusion-2](https://huggingface.co/Habana/stable-diffusion-2) Gaudi configurations (mixed precision bf16/fp32) to demonstrate their performance. For [Stable Diffusion v1.5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) on 512x512 images: | | Latency (batch size = 1) | Throughput | | ---------------------- |:------------------------:|:---------------------------:| | first-generation Gaudi | 3.80s | 0.308 images/s (batch size = 8) | | Gaudi2 | 1.33s | 1.081 images/s (batch size = 8) | For [Stable Diffusion v2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1) on 768x768 images: | | Latency (batch size = 1) | Throughput | | ---------------------- |:------------------------:|:-------------------------------:| | first-generation Gaudi | 10.2s | 0.108 images/s (batch size = 4) | | Gaudi2 | 3.17s | 0.379 images/s (batch size = 8) |
diffusers/docs/source/en/optimization/habana.md/0
{ "file_path": "diffusers/docs/source/en/optimization/habana.md", "repo_id": "diffusers", "token_count": 1405 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Quicktour Diffusion models are trained to denoise random Gaussian noise step-by-step to generate a sample of interest, such as an image or audio. This has sparked a tremendous amount of interest in generative AI, and you have probably seen examples of diffusion generated images on the internet. 🧨 Diffusers is a library aimed at making diffusion models widely accessible to everyone. Whether you're a developer or an everyday user, this quicktour will introduce you to 🧨 Diffusers and help you get up and generating quickly! There are three main components of the library to know about: * The [`DiffusionPipeline`] is a high-level end-to-end class designed to rapidly generate samples from pretrained diffusion models for inference. * Popular pretrained [model](./api/models) architectures and modules that can be used as building blocks for creating diffusion systems. * Many different [schedulers](./api/schedulers/overview) - algorithms that control how noise is added for training, and how to generate denoised images during inference. The quicktour will show you how to use the [`DiffusionPipeline`] for inference, and then walk you through how to combine a model and scheduler to replicate what's happening inside the [`DiffusionPipeline`]. <Tip> The quicktour is a simplified version of the introductory 🧨 Diffusers [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) to help you get started quickly. If you want to learn more about 🧨 Diffusers' goal, design philosophy, and additional details about its core API, check out the notebook! </Tip> Before you begin, make sure you have all the necessary libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index) speeds up model loading for inference and training. - [🤗 Transformers](https://huggingface.co/docs/transformers/index) is required to run the most popular diffusion models, such as [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview). ## DiffusionPipeline The [`DiffusionPipeline`] is the easiest way to use a pretrained diffusion system for inference. It is an end-to-end system containing the model and the scheduler. You can use the [`DiffusionPipeline`] out-of-the-box for many tasks. Take a look at the table below for some supported tasks, and for a complete list of supported tasks, check out the [🧨 Diffusers Summary](./api/pipelines/overview#diffusers-summary) table. | **Task** | **Description** | **Pipeline** |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| | Unconditional Image Generation | generate an image from Gaussian noise | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | generate an image given a text prompt | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | adapt an image guided by a text prompt | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | fill the masked part of an image given the image, the mask and a text prompt | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | adapt parts of an image guided by a text prompt while preserving structure via depth estimation | [depth2img](./using-diffusers/depth2img) | Start by creating an instance of a [`DiffusionPipeline`] and specify which pipeline checkpoint you would like to download. You can use the [`DiffusionPipeline`] for any [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads) stored on the Hugging Face Hub. In this quicktour, you'll load the [`stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) checkpoint for text-to-image generation. <Tip warning={true}> For [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion) models, please carefully read the [license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) first before running the model. 🧨 Diffusers implements a [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) to prevent offensive or harmful content, but the model's improved image generation capabilities can still produce potentially harmful content. </Tip> Load the model with the [`~DiffusionPipeline.from_pretrained`] method: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) ``` The [`DiffusionPipeline`] downloads and caches all modeling, tokenization, and scheduling components. You'll see that the Stable Diffusion pipeline is composed of the [`UNet2DConditionModel`] and [`PNDMScheduler`] among other things: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.21.4", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` We strongly recommend running the pipeline on a GPU because the model consists of roughly 1.4 billion parameters. You can move the generator object to a GPU, just like you would in PyTorch: ```python >>> pipeline.to("cuda") ``` Now you can pass a text prompt to the `pipeline` to generate an image, and then access the denoised image. By default, the image output is wrapped in a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) object. ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> Save the image by calling `save`: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### Local pipeline You can also use the pipeline locally. The only difference is you need to download the weights first: ```bash !git lfs install !git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 ``` Then load the saved weights into the pipeline: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) ``` Now, you can run the pipeline as you would in the section above. ### Swapping schedulers Different schedulers come with different denoising speeds and quality trade-offs. The best way to find out which one works best for you is to try them out! One of the main features of 🧨 Diffusers is to allow you to easily switch between schedulers. For example, to replace the default [`PNDMScheduler`] with the [`EulerDiscreteScheduler`], load it with the [`~diffusers.ConfigMixin.from_config`] method: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` Try generating an image with the new scheduler and see if you notice a difference! In the next section, you'll take a closer look at the components - the model and scheduler - that make up the [`DiffusionPipeline`] and learn how to use these components to generate an image of a cat. ## Models Most models take a noisy sample, and at each timestep it predicts the *noise residual* (other models learn to predict the previous sample directly or the velocity or [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)), the difference between a less noisy image and the input image. You can mix and match models to create other diffusion systems. Models are initiated with the [`~ModelMixin.from_pretrained`] method which also locally caches the model weights so it is faster the next time you load the model. For the quicktour, you'll load the [`UNet2DModel`], a basic unconditional image generation model with a checkpoint trained on cat images: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id, use_safetensors=True) ``` To access the model parameters, call `model.config`: ```py >>> model.config ``` The model configuration is a 🧊 frozen 🧊 dictionary, which means those parameters can't be changed after the model is created. This is intentional and ensures that the parameters used to define the model architecture at the start remain the same, while other parameters can still be adjusted during inference. Some of the most important parameters are: * `sample_size`: the height and width dimension of the input sample. * `in_channels`: the number of input channels of the input sample. * `down_block_types` and `up_block_types`: the type of down- and upsampling blocks used to create the UNet architecture. * `block_out_channels`: the number of output channels of the downsampling blocks; also used in reverse order for the number of input channels of the upsampling blocks. * `layers_per_block`: the number of ResNet blocks present in each UNet block. To use the model for inference, create the image shape with random Gaussian noise. It should have a `batch` axis because the model can receive multiple random noises, a `channel` axis corresponding to the number of input channels, and a `sample_size` axis for the height and width of the image: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` For inference, pass the noisy image and a `timestep` to the model. The `timestep` indicates how noisy the input image is, with more noise at the beginning and less at the end. This helps the model determine its position in the diffusion process, whether it is closer to the start or the end. Use the `sample` method to get the model output: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` To generate actual examples though, you'll need a scheduler to guide the denoising process. In the next section, you'll learn how to couple a model with a scheduler. ## Schedulers Schedulers manage going from a noisy sample to a less noisy sample given the model output - in this case, it is the `noisy_residual`. <Tip> 🧨 Diffusers is a toolbox for building diffusion systems. While the [`DiffusionPipeline`] is a convenient way to get started with a pre-built diffusion system, you can also choose your own model and scheduler components separately to build a custom diffusion system. </Tip> For the quicktour, you'll instantiate the [`DDPMScheduler`] with its [`~diffusers.ConfigMixin.from_config`] method: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_pretrained(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.21.4", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "dynamic_thresholding_ratio": 0.995, "num_train_timesteps": 1000, "prediction_type": "epsilon", "sample_max_value": 1.0, "steps_offset": 0, "thresholding": false, "timestep_spacing": "leading", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 Unlike a model, a scheduler does not have trainable weights and is parameter-free! </Tip> Some of the most important parameters are: * `num_train_timesteps`: the length of the denoising process or, in other words, the number of timesteps required to process random Gaussian noise into a data sample. * `beta_schedule`: the type of noise schedule to use for inference and training. * `beta_start` and `beta_end`: the start and end noise values for the noise schedule. To predict a slightly less noisy image, pass the following to the scheduler's [`~diffusers.DDPMScheduler.step`] method: model output, `timestep`, and current `sample`. ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` The `less_noisy_sample` can be passed to the next `timestep` where it'll get even less noisy! Let's bring it all together now and visualize the entire denoising process. First, create a function that postprocesses and displays the denoised image as a `PIL.Image`: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` To speed up the denoising process, move the input and model to a GPU: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` Now create a denoising loop that predicts the residual of the less noisy sample, and computes the less noisy sample with the scheduler: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` Sit back and watch as a cat is generated from nothing but noise! 😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## Next steps Hopefully, you generated some cool images with 🧨 Diffusers in this quicktour! For your next steps, you can: * Train or finetune a model to generate your own images in the [training](./tutorials/basic_training) tutorial. * See example official and community [training or finetuning scripts](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) for a variety of use cases. * Learn more about loading, accessing, changing, and comparing schedulers in the [Using different Schedulers](./using-diffusers/schedulers) guide. * Explore prompt engineering, speed and memory optimizations, and tips and tricks for generating higher-quality images with the [Stable Diffusion](./stable_diffusion) guide. * Dive deeper into speeding up 🧨 Diffusers with guides on [optimized PyTorch on a GPU](./optimization/fp16), and inference guides for running [Stable Diffusion on Apple Silicon (M1/M2)](./optimization/mps) and [ONNX Runtime](./optimization/onnx).
diffusers/docs/source/en/quicktour.md/0
{ "file_path": "diffusers/docs/source/en/quicktour.md", "repo_id": "diffusers", "token_count": 4860 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # T2I-Adapter [T2I-Adapter](https://hf.co/papers/2302.08453) is a lightweight adapter model that provides an additional conditioning input image (line art, canny, sketch, depth, pose) to better control image generation. It is similar to a ControlNet, but it is a lot smaller (~77M parameters and ~300MB file size) because its only inserts weights into the UNet instead of copying and training it. The T2I-Adapter is only available for training with the Stable Diffusion XL (SDXL) model. This guide will explore the [train_t2i_adapter_sdxl.py](https://github.com/huggingface/diffusers/blob/main/examples/t2i_adapter/train_t2i_adapter_sdxl.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: ```bash cd examples/t2i_adapter pip install -r requirements.txt ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/t2i_adapter/train_t2i_adapter_sdxl.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters The training script provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L233) function. It provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to activate gradient accumulation, add the `--gradient_accumulation_steps` parameter to the training command: ```bash accelerate launch train_t2i_adapter_sdxl.py \ ----gradient_accumulation_steps=4 ``` Many of the basic and important parameters are described in the [Text-to-image](text2image#script-parameters) training guide, so this guide just focuses on the relevant T2I-Adapter parameters: - `--pretrained_vae_model_name_or_path`: path to a pretrained VAE; the SDXL VAE is known to suffer from numerical instability, so this parameter allows you to specify a better [VAE](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix) - `--crops_coords_top_left_h` and `--crops_coords_top_left_w`: height and width coordinates to include in SDXL's crop coordinate embeddings - `--conditioning_image_column`: the column of the conditioning images in the dataset - `--proportion_empty_prompts`: the proportion of image prompts to replace with empty strings ## Training script As with the script parameters, a walkthrough of the training script is provided in the [Text-to-image](text2image#training-script) training guide. Instead, this guide takes a look at the T2I-Adapter relevant parts of the script. The training script begins by preparing the dataset. This incudes [tokenizing](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L674) the prompt and [applying transforms](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L714) to the images and conditioning images. ```py conditioning_image_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution), transforms.ToTensor(), ] ) ``` Within the [`main()`](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L770) function, the T2I-Adapter is either loaded from a pretrained adapter or it is randomly initialized: ```py if args.adapter_model_name_or_path: logger.info("Loading existing adapter weights.") t2iadapter = T2IAdapter.from_pretrained(args.adapter_model_name_or_path) else: logger.info("Initializing t2iadapter weights.") t2iadapter = T2IAdapter( in_channels=3, channels=(320, 640, 1280, 1280), num_res_blocks=2, downscale_factor=16, adapter_type="full_adapter_xl", ) ``` The [optimizer](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L952) is initialized for the T2I-Adapter parameters: ```py params_to_optimize = t2iadapter.parameters() optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Lastly, in the [training loop](https://github.com/huggingface/diffusers/blob/aab6de22c33cc01fb7bc81c0807d6109e2c998c9/examples/t2i_adapter/train_t2i_adapter_sdxl.py#L1086), the adapter conditioning image and the text embeddings are passed to the UNet to predict the noise residual: ```py t2iadapter_image = batch["conditioning_pixel_values"].to(dtype=weight_dtype) down_block_additional_residuals = t2iadapter(t2iadapter_image) down_block_additional_residuals = [ sample.to(dtype=weight_dtype) for sample in down_block_additional_residuals ] model_pred = unet( inp_noisy_latents, timesteps, encoder_hidden_states=batch["prompt_ids"], added_cond_kwargs=batch["unet_added_conditions"], down_block_additional_residuals=down_block_additional_residuals, ).sample ``` If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Now you’re ready to launch the training script! 🚀 For this example training, you'll use the [fusing/fill50k](https://huggingface.co/datasets/fusing/fill50k) dataset. You can also create and use your own dataset if you want (see the [Create a dataset for training](https://moon-ci-docs.huggingface.co/docs/diffusers/pr_5512/en/training/create_dataset) guide). Set the environment variable `MODEL_DIR` to a model id on the Hub or a path to a local model and `OUTPUT_DIR` to where you want to save the model. Download the following images to condition your training with: ```bash wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` <Tip> To monitor training progress with Weights & Biases, add the `--report_to=wandb` parameter to the training command. You'll also need to add the `--validation_image`, `--validation_prompt`, and `--validation_steps` to the training command to keep track of results. This can be really useful for debugging the model and viewing intermediate results. </Tip> ```bash export MODEL_DIR="stabilityai/stable-diffusion-xl-base-1.0" export OUTPUT_DIR="path to save model" accelerate launch train_t2i_adapter_sdxl.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --mixed_precision="fp16" \ --resolution=1024 \ --learning_rate=1e-5 \ --max_train_steps=15000 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=100 \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --report_to="wandb" \ --seed=42 \ --push_to_hub ``` Once training is complete, you can use your T2I-Adapter for inference: ```py from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, EulerAncestralDiscreteSchedulerTest from diffusers.utils import load_image import torch adapter = T2IAdapter.from_pretrained("path/to/adapter", torch_dtype=torch.float16) pipeline = StableDiffusionXLAdapterPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", adapter=adapter, torch_dtype=torch.float16 ) pipeline.scheduler = EulerAncestralDiscreteSchedulerTest.from_config(pipe.scheduler.config) pipeline.enable_xformers_memory_efficient_attention() pipeline.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png") prompt = "pale golden rod circle with old lace background" generator = torch.manual_seed(0) image = pipeline( prompt, image=control_image, generator=generator ).images[0] image.save("./output.png") ``` ## Next steps Congratulations on training a T2I-Adapter model! 🎉 To learn more: - Read the [Efficient Controllable Generation for SDXL with T2I-Adapters](https://huggingface.co/blog/t2i-sdxl-adapters) blog post to learn more details about the experimental results from the T2I-Adapter team.
diffusers/docs/source/en/training/t2i_adapters.md/0
{ "file_path": "diffusers/docs/source/en/training/t2i_adapters.md", "repo_id": "diffusers", "token_count": 3502 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ControlNet ControlNet is a type of model for controlling image diffusion models by conditioning the model with an additional input image. There are many types of conditioning inputs (canny edge, user sketching, human pose, depth, and more) you can use to control a diffusion model. This is hugely useful because it affords you greater control over image generation, making it easier to generate specific images without experimenting with different text prompts or denoising values as much. <Tip> Check out Section 3.5 of the [ControlNet](https://huggingface.co/papers/2302.05543) paper v1 for a list of ControlNet implementations on various conditioning inputs. You can find the official Stable Diffusion ControlNet conditioned models on [lllyasviel](https://huggingface.co/lllyasviel)'s Hub profile, and more [community-trained](https://huggingface.co/models?other=stable-diffusion&other=controlnet) ones on the Hub. For Stable Diffusion XL (SDXL) ControlNet models, you can find them on the 🤗 [Diffusers](https://huggingface.co/diffusers) Hub organization, or you can browse [community-trained](https://huggingface.co/models?other=stable-diffusion-xl&other=controlnet) ones on the Hub. </Tip> A ControlNet model has two sets of weights (or blocks) connected by a zero-convolution layer: - a *locked copy* keeps everything a large pretrained diffusion model has learned - a *trainable copy* is trained on the additional conditioning input Since the locked copy preserves the pretrained model, training and implementing a ControlNet on a new conditioning input is as fast as finetuning any other model because you aren't training the model from scratch. This guide will show you how to use ControlNet for text-to-image, image-to-image, inpainting, and more! There are many types of ControlNet conditioning inputs to choose from, but in this guide we'll only focus on several of them. Feel free to experiment with other conditioning inputs! Before you begin, make sure you have the following libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install -q diffusers transformers accelerate opencv-python ``` ## Text-to-image For text-to-image, you normally pass a text prompt to the model. But with ControlNet, you can specify an additional conditioning input. Let's condition the model with a canny image, a white outline of an image on a black background. This way, the ControlNet can use the canny image as a control to guide the model to generate an image with the same outline. Load an image and use the [opencv-python](https://github.com/opencv/opencv-python) library to extract the canny image: ```py from diffusers.utils import load_image, make_image_grid from PIL import Image import cv2 import numpy as np original_image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ) image = np.array(original_image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/vermeer_canny_edged.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">canny image</figcaption> </div> </div> Next, load a ControlNet model conditioned on canny edge detection and pass it to the [`StableDiffusionControlNetPipeline`]. Use the faster [`UniPCMultistepScheduler`] and enable model offloading to speed up inference and reduce memory usage. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler import torch controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16, use_safetensors=True) pipe = StableDiffusionControlNetPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() ``` Now pass your prompt and canny image to the pipeline: ```py output = pipe( "the mona lisa", image=canny_image ).images[0] make_image_grid([original_image, canny_image, output], rows=1, cols=3) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet-text2img.png"/> </div> ## Image-to-image For image-to-image, you'd typically pass an initial image and a prompt to the pipeline to generate a new image. With ControlNet, you can pass an additional conditioning input to guide the model. Let's condition the model with a depth map, an image which contains spatial information. This way, the ControlNet can use the depth map as a control to guide the model to generate an image that preserves spatial information. You'll use the [`StableDiffusionControlNetImg2ImgPipeline`] for this task, which is different from the [`StableDiffusionControlNetPipeline`] because it allows you to pass an initial image as the starting point for the image generation process. Load an image and use the `depth-estimation` [`~transformers.Pipeline`] from 🤗 Transformers to extract the depth map of an image: ```py import torch import numpy as np from transformers import pipeline from diffusers.utils import load_image, make_image_grid image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet-img2img.jpg" ) def get_depth_map(image, depth_estimator): image = depth_estimator(image)["depth"] image = np.array(image) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) detected_map = torch.from_numpy(image).float() / 255.0 depth_map = detected_map.permute(2, 0, 1) return depth_map depth_estimator = pipeline("depth-estimation") depth_map = get_depth_map(image, depth_estimator).unsqueeze(0).half().to("cuda") ``` Next, load a ControlNet model conditioned on depth maps and pass it to the [`StableDiffusionControlNetImg2ImgPipeline`]. Use the faster [`UniPCMultistepScheduler`] and enable model offloading to speed up inference and reduce memory usage. ```py from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler import torch controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, use_safetensors=True) pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() ``` Now pass your prompt, initial image, and depth map to the pipeline: ```py output = pipe( "lego batman and robin", image=image, control_image=depth_map, ).images[0] make_image_grid([image, output], rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet-img2img.jpg"/> <figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet-img2img-2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption> </div> </div> ## Inpainting For inpainting, you need an initial image, a mask image, and a prompt describing what to replace the mask with. ControlNet models allow you to add another control image to condition a model with. Let’s condition the model with an inpainting mask. This way, the ControlNet can use the inpainting mask as a control to guide the model to generate an image within the mask area. Load an initial image and a mask image: ```py from diffusers.utils import load_image, make_image_grid init_image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet-inpaint.jpg" ) init_image = init_image.resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet-inpaint-mask.jpg" ) mask_image = mask_image.resize((512, 512)) make_image_grid([init_image, mask_image], rows=1, cols=2) ``` Create a function to prepare the control image from the initial and mask images. This'll create a tensor to mark the pixels in `init_image` as masked if the corresponding pixel in `mask_image` is over a certain threshold. ```py import numpy as np import torch def make_inpaint_condition(image, image_mask): image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0 assert image.shape[0:1] == image_mask.shape[0:1] image[image_mask > 0.5] = -1.0 # set as masked pixel image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) image = torch.from_numpy(image) return image control_image = make_inpaint_condition(init_image, mask_image) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet-inpaint.jpg"/> <figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet-inpaint-mask.jpg"/> <figcaption class="mt-2 text-center text-sm text-gray-500">mask image</figcaption> </div> </div> Load a ControlNet model conditioned on inpainting and pass it to the [`StableDiffusionControlNetInpaintPipeline`]. Use the faster [`UniPCMultistepScheduler`] and enable model offloading to speed up inference and reduce memory usage. ```py from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, UniPCMultistepScheduler controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16, use_safetensors=True) pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() ``` Now pass your prompt, initial image, mask image, and control image to the pipeline: ```py output = pipe( "corgi face with large ears, detailed, pixar, animated, disney", num_inference_steps=20, eta=1.0, image=init_image, mask_image=mask_image, control_image=control_image, ).images[0] make_image_grid([init_image, mask_image, output], rows=1, cols=3) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet-inpaint-result.png"/> </div> ## Guess mode [Guess mode](https://github.com/lllyasviel/ControlNet/discussions/188) does not require supplying a prompt to a ControlNet at all! This forces the ControlNet encoder to do its best to "guess" the contents of the input control map (depth map, pose estimation, canny edge, etc.). Guess mode adjusts the scale of the output residuals from a ControlNet by a fixed ratio depending on the block depth. The shallowest `DownBlock` corresponds to 0.1, and as the blocks get deeper, the scale increases exponentially such that the scale of the `MidBlock` output becomes 1.0. <Tip> Guess mode does not have any impact on prompt conditioning and you can still provide a prompt if you want. </Tip> Set `guess_mode=True` in the pipeline, and it is [recommended](https://github.com/lllyasviel/ControlNet#guess-mode--non-prompt-mode) to set the `guidance_scale` value between 3.0 and 5.0. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers.utils import load_image, make_image_grid import numpy as np import torch from PIL import Image import cv2 controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", use_safetensors=True) pipe = StableDiffusionControlNetPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, use_safetensors=True).to("cuda") original_image = load_image("https://huggingface.co/takuma104/controlnet_dev/resolve/main/bird_512x512.png") image = np.array(original_image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) image = pipe("", image=canny_image, guess_mode=True, guidance_scale=3.0).images[0] make_image_grid([original_image, canny_image, image], rows=1, cols=3) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/takuma104/controlnet_dev/resolve/main/gen_compare_guess_mode/output_images/diffusers/output_bird_canny_0.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">regular mode with prompt</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/takuma104/controlnet_dev/resolve/main/gen_compare_guess_mode/output_images/diffusers/output_bird_canny_0_gm.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guess mode without prompt</figcaption> </div> </div> ## ControlNet with Stable Diffusion XL There aren't too many ControlNet models compatible with Stable Diffusion XL (SDXL) at the moment, but we've trained two full-sized ControlNet models for SDXL conditioned on canny edge detection and depth maps. We're also experimenting with creating smaller versions of these SDXL-compatible ControlNet models so it is easier to run on resource-constrained hardware. You can find these checkpoints on the [🤗 Diffusers Hub organization](https://huggingface.co/diffusers)! Let's use a SDXL ControlNet conditioned on canny images to generate an image. Start by loading an image and prepare the canny image: ```py from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL from diffusers.utils import load_image, make_image_grid from PIL import Image import cv2 import numpy as np import torch original_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" ) image = np.array(original_image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) make_image_grid([original_image, canny_image], rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hf-logo-canny.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">canny image</figcaption> </div> </div> Load a SDXL ControlNet model conditioned on canny edge detection and pass it to the [`StableDiffusionXLControlNetPipeline`]. You can also enable model offloading to reduce memory usage. ```py controlnet = ControlNetModel.from_pretrained( "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16, use_safetensors=True ) vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16, use_safetensors=True ) pipe.enable_model_cpu_offload() ``` Now pass your prompt (and optionally a negative prompt if you're using one) and canny image to the pipeline: <Tip> The [`controlnet_conditioning_scale`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/controlnet#diffusers.StableDiffusionControlNetPipeline.__call__.controlnet_conditioning_scale) parameter determines how much weight to assign to the conditioning inputs. A value of 0.5 is recommended for good generalization, but feel free to experiment with this number! </Tip> ```py prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" negative_prompt = 'low quality, bad quality, sketches' image = pipe( prompt, negative_prompt=negative_prompt, image=canny_image, controlnet_conditioning_scale=0.5, ).images[0] make_image_grid([original_image, canny_image, image], rows=1, cols=3) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/diffusers/controlnet-canny-sdxl-1.0/resolve/main/out_hug_lab_7.png"/> </div> You can use [`StableDiffusionXLControlNetPipeline`] in guess mode as well by setting the parameter to `True`: ```py from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL from diffusers.utils import load_image, make_image_grid import numpy as np import torch import cv2 from PIL import Image prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" negative_prompt = "low quality, bad quality, sketches" original_image = load_image( "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" ) controlnet = ControlNetModel.from_pretrained( "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16, use_safetensors=True ) vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16, use_safetensors=True ) pipe.enable_model_cpu_offload() image = np.array(original_image) image = cv2.Canny(image, 100, 200) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) image = pipe( prompt, negative_prompt=negative_prompt, controlnet_conditioning_scale=0.5, image=canny_image, guess_mode=True, ).images[0] make_image_grid([original_image, canny_image, image], rows=1, cols=3) ``` <Tip> You can use a refiner model with `StableDiffusionXLControlNetPipeline` to improve image quality, just like you can with a regular `StableDiffusionXLPipeline`. See the [Refine image quality](./sdxl#refine-image-quality) section to learn how to use the refiner model. Make sure to use `StableDiffusionXLControlNetPipeline` and pass `image` and `controlnet_conditioning_scale`. ```py base = StableDiffusionXLControlNetPipeline(...) image = base( prompt=prompt, controlnet_conditioning_scale=0.5, image=canny_image, num_inference_steps=40, denoising_end=0.8, output_type="latent", ).images # rest exactly as with StableDiffusionXLPipeline ``` </Tip> ## MultiControlNet <Tip> Replace the SDXL model with a model like [stable-diffusion-v1-5/stable-diffusion-v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) to use multiple conditioning inputs with Stable Diffusion models. </Tip> You can compose multiple ControlNet conditionings from different image inputs to create a *MultiControlNet*. To get better results, it is often helpful to: 1. mask conditionings such that they don't overlap (for example, mask the area of a canny image where the pose conditioning is located) 2. experiment with the [`controlnet_conditioning_scale`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/controlnet#diffusers.StableDiffusionControlNetPipeline.__call__.controlnet_conditioning_scale) parameter to determine how much weight to assign to each conditioning input In this example, you'll combine a canny image and a human pose estimation image to generate a new image. Prepare the canny image conditioning: ```py from diffusers.utils import load_image, make_image_grid from PIL import Image import numpy as np import cv2 original_image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png" ) image = np.array(original_image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) # zero out middle columns of image where pose will be overlaid zero_start = image.shape[1] // 4 zero_end = zero_start + image.shape[1] // 2 image[:, zero_start:zero_end] = 0 image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) make_image_grid([original_image, canny_image], rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/landscape_canny_masked.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">canny image</figcaption> </div> </div> For human pose estimation, install [controlnet_aux](https://github.com/patrickvonplaten/controlnet_aux): ```py # uncomment to install the necessary library in Colab #!pip install -q controlnet-aux ``` Prepare the human pose estimation conditioning: ```py from controlnet_aux import OpenposeDetector openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") original_image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png" ) openpose_image = openpose(original_image) make_image_grid([original_image, openpose_image], rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/controlnet/person_pose.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">human pose image</figcaption> </div> </div> Load a list of ControlNet models that correspond to each conditioning, and pass them to the [`StableDiffusionXLControlNetPipeline`]. Use the faster [`UniPCMultistepScheduler`] and enable model offloading to reduce memory usage. ```py from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL, UniPCMultistepScheduler import torch controlnets = [ ControlNetModel.from_pretrained( "thibaud/controlnet-openpose-sdxl-1.0", torch_dtype=torch.float16 ), ControlNetModel.from_pretrained( "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16, use_safetensors=True ), ] vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnets, vae=vae, torch_dtype=torch.float16, use_safetensors=True ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() ``` Now you can pass your prompt (an optional negative prompt if you're using one), canny image, and pose image to the pipeline: ```py prompt = "a giant standing in a fantasy landscape, best quality" negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" generator = torch.manual_seed(1) images = [openpose_image.resize((1024, 1024)), canny_image.resize((1024, 1024))] images = pipe( prompt, image=images, num_inference_steps=25, generator=generator, negative_prompt=negative_prompt, num_images_per_prompt=3, controlnet_conditioning_scale=[1.0, 0.8], ).images make_image_grid([original_image, canny_image, openpose_image, images[0].resize((512, 512)), images[1].resize((512, 512)), images[2].resize((512, 512))], rows=2, cols=3) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multicontrolnet.png"/> </div>
diffusers/docs/source/en/using-diffusers/controlnet.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/controlnet.md", "repo_id": "diffusers", "token_count": 8675 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Model files and layouts [[open-in-colab]] Diffusion models are saved in various file types and organized in different layouts. Diffusers stores model weights as safetensors files in *Diffusers-multifolder* layout and it also supports loading files (like safetensors and ckpt files) from a *single-file* layout which is commonly used in the diffusion ecosystem. Each layout has its own benefits and use cases, and this guide will show you how to load the different files and layouts, and how to convert them. ## Files PyTorch model weights are typically saved with Python's [pickle](https://docs.python.org/3/library/pickle.html) utility as ckpt or bin files. However, pickle is not secure and pickled files may contain malicious code that can be executed. This vulnerability is a serious concern given the popularity of model sharing. To address this security issue, the [Safetensors](https://hf.co/docs/safetensors) library was developed as a secure alternative to pickle, which saves models as safetensors files. ### safetensors > [!TIP] > Learn more about the design decisions and why safetensor files are preferred for saving and loading model weights in the [Safetensors audited as really safe and becoming the default](https://blog.eleuther.ai/safetensors-security-audit/) blog post. [Safetensors](https://hf.co/docs/safetensors) is a safe and fast file format for securely storing and loading tensors. Safetensors restricts the header size to limit certain types of attacks, supports lazy loading (useful for distributed setups), and has generally faster loading speeds. Make sure you have the [Safetensors](https://hf.co/docs/safetensors) library installed. ```py !pip install safetensors ``` Safetensors stores weights in a safetensors file. Diffusers loads safetensors files by default if they're available and the Safetensors library is installed. There are two ways safetensors files can be organized: 1. Diffusers-multifolder layout: there may be several separate safetensors files, one for each pipeline component (text encoder, UNet, VAE), organized in subfolders (check out the [stable-diffusion-v1-5/stable-diffusion-v1-5](https://hf.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main) repository as an example) 2. single-file layout: all the model weights may be saved in a single file (check out the [WarriorMama777/OrangeMixs](https://hf.co/WarriorMama777/OrangeMixs/tree/main/Models/AbyssOrangeMix) repository as an example) <hfoptions id="safetensors"> <hfoption id="multifolder"> Use the [`~DiffusionPipeline.from_pretrained`] method to load a model with safetensors files stored in multiple folders. ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True ) ``` </hfoption> <hfoption id="single file"> Use the [`~loaders.FromSingleFileMixin.from_single_file`] method to load a model with all the weights stored in a single safetensors file. ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_single_file( "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors" ) ``` </hfoption> </hfoptions> #### LoRA files [LoRA](https://hf.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) is a lightweight adapter that is fast and easy to train, making them especially popular for generating images in a certain way or style. These adapters are commonly stored in a safetensors file, and are widely popular on model sharing platforms like [civitai](https://civitai.com/). LoRAs are loaded into a base model with the [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] method. ```py from diffusers import StableDiffusionXLPipeline import torch # base model pipeline = StableDiffusionXLPipeline.from_pretrained( "Lykon/dreamshaper-xl-1-0", torch_dtype=torch.float16, variant="fp16" ).to("cuda") # download LoRA weights !wget https://civitai.com/api/download/models/168776 -O blueprintify.safetensors # load LoRA weights pipeline.load_lora_weights(".", weight_name="blueprintify.safetensors") prompt = "bl3uprint, a highly detailed blueprint of the empire state building, explaining how to build all parts, many txt, blueprint grid backdrop" negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture" image = pipeline( prompt=prompt, negative_prompt=negative_prompt, generator=torch.manual_seed(0), ).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/blueprint-lora.png"/> </div> ### ckpt > [!WARNING] > Pickled files may be unsafe because they can be exploited to execute malicious code. It is recommended to use safetensors files instead where possible, or convert the weights to safetensors files. PyTorch's [torch.save](https://pytorch.org/docs/stable/generated/torch.save.html) function uses Python's [pickle](https://docs.python.org/3/library/pickle.html) utility to serialize and save models. These files are saved as a ckpt file and they contain the entire model's weights. Use the [`~loaders.FromSingleFileMixin.from_single_file`] method to directly load a ckpt file. ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_single_file( "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned.ckpt" ) ``` ## Storage layout There are two ways model files are organized, either in a Diffusers-multifolder layout or in a single-file layout. The Diffusers-multifolder layout is the default, and each component file (text encoder, UNet, VAE) is stored in a separate subfolder. Diffusers also supports loading models from a single-file layout where all the components are bundled together. ### Diffusers-multifolder The Diffusers-multifolder layout is the default storage layout for Diffusers. Each component's (text encoder, UNet, VAE) weights are stored in a separate subfolder. The weights can be stored as safetensors or ckpt files. <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multifolder-layout.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">multifolder layout</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/multifolder-unet.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">UNet subfolder</figcaption> </div> </div> To load from Diffusers-multifolder layout, use the [`~DiffusionPipeline.from_pretrained`] method. ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") ``` Benefits of using the Diffusers-multifolder layout include: 1. Faster to load each component file individually or in parallel. 2. Reduced memory usage because you only load the components you need. For example, models like [SDXL Turbo](https://hf.co/stabilityai/sdxl-turbo), [SDXL Lightning](https://hf.co/ByteDance/SDXL-Lightning), and [Hyper-SD](https://hf.co/ByteDance/Hyper-SD) have the same components except for the UNet. You can reuse their shared components with the [`~DiffusionPipeline.from_pipe`] method without consuming any additional memory (take a look at the [Reuse a pipeline](./loading#reuse-a-pipeline) guide) and only load the UNet. This way, you don't need to download redundant components and unnecessarily use more memory. ```py import torch from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler # download one model sdxl_pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") # switch UNet for another model unet = UNet2DConditionModel.from_pretrained( "stabilityai/sdxl-turbo", subfolder="unet", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) # reuse all the same components in new model except for the UNet turbo_pipeline = StableDiffusionXLPipeline.from_pipe( sdxl_pipeline, unet=unet, ).to("cuda") turbo_pipeline.scheduler = EulerDiscreteScheduler.from_config( turbo_pipeline.scheduler.config, timestep+spacing="trailing" ) image = turbo_pipeline( "an astronaut riding a unicorn on mars", num_inference_steps=1, guidance_scale=0.0, ).images[0] image ``` 3. Reduced storage requirements because if a component, such as the SDXL [VAE](https://hf.co/madebyollin/sdxl-vae-fp16-fix), is shared across multiple models, you only need to download and store a single copy of it instead of downloading and storing it multiple times. For 10 SDXL models, this can save ~3.5GB of storage. The storage savings is even greater for newer models like PixArt Sigma, where the [text encoder](https://hf.co/PixArt-alpha/PixArt-Sigma-XL-2-1024-MS/tree/main/text_encoder) alone is ~19GB! 4. Flexibility to replace a component in the model with a newer or better version. ```py from diffusers import DiffusionPipeline, AutoencoderKL vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, use_safetensors=True) pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") ``` 5. More visibility and information about a model's components, which are stored in a [config.json](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json) file in each component subfolder. ### Single-file The single-file layout stores all the model weights in a single file. All the model components (text encoder, UNet, VAE) weights are kept together instead of separately in subfolders. This can be a safetensors or ckpt file. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/single-file-layout.png"/> </div> To load from a single-file layout, use the [`~loaders.FromSingleFileMixin.from_single_file`] method. ```py import torch from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_single_file( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, ).to("cuda") ``` Benefits of using a single-file layout include: 1. Easy compatibility with diffusion interfaces such as [ComfyUI](https://github.com/comfyanonymous/ComfyUI) or [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) which commonly use a single-file layout. 2. Easier to manage (download and share) a single file. ### DDUF > [!WARNING] > DDUF is an experimental file format and APIs related to it can change in the future. DDUF (**D**DUF **D**iffusion **U**nified **F**ormat) is a file format designed to make storing, distributing, and using diffusion models much easier. Built on the ZIP file format, DDUF offers a standardized, efficient, and flexible way to package all parts of a diffusion model into a single, easy-to-manage file. It provides a balance between Diffusers multi-folder format and the widely popular single-file format. Learn more details about DDUF on the Hugging Face Hub [documentation](https://huggingface.co/docs/hub/dduf). Pass a checkpoint to the `dduf_file` parameter to load it in [`DiffusionPipeline`]. ```py from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "DDUF/FLUX.1-dev-DDUF", dduf_file="FLUX.1-dev.dduf", torch_dtype=torch.bfloat16 ).to("cuda") image = pipe( "photo a cat holding a sign that says Diffusers", num_inference_steps=50, guidance_scale=3.5 ).images[0] image.save("cat.png") ``` To save a pipeline as a `.dduf` checkpoint, use the [`~huggingface_hub.export_folder_as_dduf`] utility, which takes care of all the necessary file-level validations. ```py from huggingface_hub import export_folder_as_dduf from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) save_folder = "flux-dev" pipe.save_pretrained("flux-dev") export_folder_as_dduf("flux-dev.dduf", folder_path=save_folder) > [!TIP] > Packaging and loading quantized checkpoints in the DDUF format is supported as long as they respect the multi-folder structure. ## Convert layout and files Diffusers provides many scripts and methods to convert storage layouts and file formats to enable broader support across the diffusion ecosystem. Take a look at the [diffusers/scripts](https://github.com/huggingface/diffusers/tree/main/scripts) collection to find a script that fits your conversion needs. > [!TIP] > Scripts that have "`to_diffusers`" appended at the end mean they convert a model to the Diffusers-multifolder layout. Each script has their own specific set of arguments for configuring the conversion, so make sure you check what arguments are available! For example, to convert a Stable Diffusion XL model stored in Diffusers-multifolder layout to a single-file layout, run the [convert_diffusers_to_original_sdxl.py](https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_sdxl.py) script. Provide the path to the model to convert, and the path to save the converted model to. You can optionally specify whether you want to save the model as a safetensors file and whether to save the model in half-precision. ```bash python convert_diffusers_to_original_sdxl.py --model_path path/to/model/to/convert --checkpoint_path path/to/save/model/to --use_safetensors ``` You can also save a model to Diffusers-multifolder layout with the [`~DiffusionPipeline.save_pretrained`] method. This creates a directory for you if it doesn't already exist, and it also saves the files as a safetensors file by default. ```py from diffusers import StableDiffusionXLPipeline pipeline = StableDiffusionXLPipeline.from_single_file( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors", ) pipeline.save_pretrained() ``` Lastly, there are also Spaces, such as [SD To Diffusers](https://hf.co/spaces/diffusers/sd-to-diffusers) and [SD-XL To Diffusers](https://hf.co/spaces/diffusers/sdxl-to-diffusers), that provide a more user-friendly interface for converting models to Diffusers-multifolder layout. This is the easiest and most convenient option for converting layouts, and it'll open a PR on your model repository with the converted files. However, this option is not as reliable as running a script, and the Space may fail for more complicated models. ## Single-file layout usage Now that you're familiar with the differences between the Diffusers-multifolder and single-file layout, this section shows you how to load models and pipeline components, customize configuration options for loading, and load local files with the [`~loaders.FromSingleFileMixin.from_single_file`] method. ### Load a pipeline or model Pass the file path of the pipeline or model to the [`~loaders.FromSingleFileMixin.from_single_file`] method to load it. <hfoptions id="pipeline-model"> <hfoption id="pipeline"> ```py from diffusers import StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path) ``` </hfoption> <hfoption id="model"> ```py from diffusers import StableCascadeUNet ckpt_path = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite.safetensors" model = StableCascadeUNet.from_single_file(ckpt_path) ``` </hfoption> </hfoptions> Customize components in the pipeline by passing them directly to the [`~loaders.FromSingleFileMixin.from_single_file`] method. For example, you can use a different scheduler in a pipeline. ```py from diffusers import StableDiffusionXLPipeline, DDIMScheduler ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" scheduler = DDIMScheduler() pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, scheduler=scheduler) ``` Or you could use a ControlNet model in the pipeline. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel ckpt_path = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipeline = StableDiffusionControlNetPipeline.from_single_file(ckpt_path, controlnet=controlnet) ``` ### Customize configuration options Models have a configuration file that define their attributes like the number of inputs in a UNet. Pipelines configuration options are available in the pipeline's class. For example, if you look at the [`StableDiffusionXLInstructPix2PixPipeline`] class, there is an option to scale the image latents with the `is_cosxl_edit` parameter. These configuration files can be found in the models Hub repository or another location from which the configuration file originated (for example, a GitHub repository or locally on your device). <hfoptions id="config-file"> <hfoption id="Hub configuration file"> > [!TIP] > The [`~loaders.FromSingleFileMixin.from_single_file`] method automatically maps the checkpoint to the appropriate model repository, but there are cases where it is useful to use the `config` parameter. For example, if the model components in the checkpoint are different from the original checkpoint or if a checkpoint doesn't have the necessary metadata to correctly determine the configuration to use for the pipeline. The [`~loaders.FromSingleFileMixin.from_single_file`] method automatically determines the configuration to use from the configuration file in the model repository. You could also explicitly specify the configuration to use by providing the repository id to the `config` parameter. ```py from diffusers import StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/segmind/SSD-1B/blob/main/SSD-1B.safetensors" repo_id = "segmind/SSD-1B" pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, config=repo_id) ``` The model loads the configuration file for the [UNet](https://huggingface.co/segmind/SSD-1B/blob/main/unet/config.json), [VAE](https://huggingface.co/segmind/SSD-1B/blob/main/vae/config.json), and [text encoder](https://huggingface.co/segmind/SSD-1B/blob/main/text_encoder/config.json) from their respective subfolders in the repository. </hfoption> <hfoption id="original configuration file"> The [`~loaders.FromSingleFileMixin.from_single_file`] method can also load the original configuration file of a pipeline that is stored elsewhere. Pass a local path or URL of the original configuration file to the `original_config` parameter. ```py from diffusers import StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" original_config = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_path, original_config=original_config) ``` > [!TIP] > Diffusers attempts to infer the pipeline components based on the type signatures of the pipeline class when you use `original_config` with `local_files_only=True`, instead of fetching the configuration files from the model repository on the Hub. This prevents backward breaking changes in code that can't connect to the internet to fetch the necessary configuration files. > > This is not as reliable as providing a path to a local model repository with the `config` parameter, and might lead to errors during pipeline configuration. To avoid errors, run the pipeline with `local_files_only=False` once to download the appropriate pipeline configuration files to the local cache. </hfoption> </hfoptions> While the configuration files specify the pipeline or models default parameters, you can override them by providing the parameters directly to the [`~loaders.FromSingleFileMixin.from_single_file`] method. Any parameter supported by the model or pipeline class can be configured in this way. <hfoptions id="override"> <hfoption id="pipeline"> For example, to scale the image latents in [`StableDiffusionXLInstructPix2PixPipeline`] pass the `is_cosxl_edit` parameter. ```python from diffusers import StableDiffusionXLInstructPix2PixPipeline ckpt_path = "https://huggingface.co/stabilityai/cosxl/blob/main/cosxl_edit.safetensors" pipeline = StableDiffusionXLInstructPix2PixPipeline.from_single_file(ckpt_path, config="diffusers/sdxl-instructpix2pix-768", is_cosxl_edit=True) ``` </hfoption> <hfoption id="model"> For example, to upcast the attention dimensions in a [`UNet2DConditionModel`] pass the `upcast_attention` parameter. ```python from diffusers import UNet2DConditionModel ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0_0.9vae.safetensors" model = UNet2DConditionModel.from_single_file(ckpt_path, upcast_attention=True) ``` </hfoption> </hfoptions> ### Local files In Diffusers>=v0.28.0, the [`~loaders.FromSingleFileMixin.from_single_file`] method attempts to configure a pipeline or model by inferring the model type from the keys in the checkpoint file. The inferred model type is used to determine the appropriate model repository on the Hugging Face Hub to configure the model or pipeline. For example, any single file checkpoint based on the Stable Diffusion XL base model will use the [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) model repository to configure the pipeline. But if you're working in an environment with restricted internet access, you should download the configuration files with the [`~huggingface_hub.snapshot_download`] function, and the model checkpoint with the [`~huggingface_hub.hf_hub_download`] function. By default, these files are downloaded to the Hugging Face Hub [cache directory](https://huggingface.co/docs/huggingface_hub/en/guides/manage-cache), but you can specify a preferred directory to download the files to with the `local_dir` parameter. Pass the configuration and checkpoint paths to the [`~loaders.FromSingleFileMixin.from_single_file`] method to load locally. <hfoptions id="local"> <hfoption id="Hub cache directory"> ```python from huggingface_hub import hf_hub_download, snapshot_download my_local_checkpoint_path = hf_hub_download( repo_id="segmind/SSD-1B", filename="SSD-1B.safetensors" ) my_local_config_path = snapshot_download( repo_id="segmind/SSD-1B", allow_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"] ) pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True) ``` </hfoption> <hfoption id="specific local directory"> ```python from huggingface_hub import hf_hub_download, snapshot_download my_local_checkpoint_path = hf_hub_download( repo_id="segmind/SSD-1B", filename="SSD-1B.safetensors" local_dir="my_local_checkpoints" ) my_local_config_path = snapshot_download( repo_id="segmind/SSD-1B", allow_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"] local_dir="my_local_config" ) pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True) ``` </hfoption> </hfoptions> #### Local files without symlink > [!TIP] > In huggingface_hub>=v0.23.0, the `local_dir_use_symlinks` argument isn't necessary for the [`~huggingface_hub.hf_hub_download`] and [`~huggingface_hub.snapshot_download`] functions. The [`~loaders.FromSingleFileMixin.from_single_file`] method relies on the [huggingface_hub](https://hf.co/docs/huggingface_hub/index) caching mechanism to fetch and store checkpoints and configuration files for models and pipelines. If you're working with a file system that does not support symlinking, you should download the checkpoint file to a local directory first, and disable symlinking with the `local_dir_use_symlink=False` parameter in the [`~huggingface_hub.hf_hub_download`] function and [`~huggingface_hub.snapshot_download`] functions. ```python from huggingface_hub import hf_hub_download, snapshot_download my_local_checkpoint_path = hf_hub_download( repo_id="segmind/SSD-1B", filename="SSD-1B.safetensors" local_dir="my_local_checkpoints", local_dir_use_symlinks=False ) print("My local checkpoint: ", my_local_checkpoint_path) my_local_config_path = snapshot_download( repo_id="segmind/SSD-1B", allow_patterns=["*.json", "**/*.json", "*.txt", "**/*.txt"] local_dir_use_symlinks=False, ) print("My local config: ", my_local_config_path) ``` Then you can pass the local paths to the `pretrained_model_link_or_path` and `config` parameters. ```python pipeline = StableDiffusionXLPipeline.from_single_file(my_local_checkpoint_path, config=my_local_config_path, local_files_only=True) ```
diffusers/docs/source/en/using-diffusers/other-formats.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/other-formats.md", "repo_id": "diffusers", "token_count": 8552 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Prompt techniques [[open-in-colab]] Prompts are important because they describe what you want a diffusion model to generate. The best prompts are detailed, specific, and well-structured to help the model realize your vision. But crafting a great prompt takes time and effort and sometimes it may not be enough because language and words can be imprecise. This is where you need to boost your prompt with other techniques, such as prompt enhancing and prompt weighting, to get the results you want. This guide will show you how you can use these prompt techniques to generate high-quality images with lower effort and adjust the weight of certain keywords in a prompt. ## Prompt engineering > [!TIP] > This is not an exhaustive guide on prompt engineering, but it will help you understand the necessary parts of a good prompt. We encourage you to continue experimenting with different prompts and combine them in new ways to see what works best. As you write more prompts, you'll develop an intuition for what works and what doesn't! New diffusion models do a pretty good job of generating high-quality images from a basic prompt, but it is still important to create a well-written prompt to get the best results. Here are a few tips for writing a good prompt: 1. What is the image *medium*? Is it a photo, a painting, a 3D illustration, or something else? 2. What is the image *subject*? Is it a person, animal, object, or scene? 3. What *details* would you like to see in the image? This is where you can get really creative and have a lot of fun experimenting with different words to bring your image to life. For example, what is the lighting like? What is the vibe and aesthetic? What kind of art or illustration style are you looking for? The more specific and precise words you use, the better the model will understand what you want to generate. <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/plain-prompt.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"A photo of a banana-shaped couch in a living room"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/detail-prompt.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"A vibrant yellow banana-shaped couch sits in a cozy living room, its curve cradling a pile of colorful cushions. on the wooden floor, a patterned rug adds a touch of eclectic charm, and a potted plant sits in the corner, reaching towards the sunlight filtering through the windows"</figcaption> </div> </div> ## Prompt enhancing with GPT2 Prompt enhancing is a technique for quickly improving prompt quality without spending too much effort constructing one. It uses a model like GPT2 pretrained on Stable Diffusion text prompts to automatically enrich a prompt with additional important keywords to generate high-quality images. The technique works by curating a list of specific keywords and forcing the model to generate those words to enhance the original prompt. This way, your prompt can be "a cat" and GPT2 can enhance the prompt to "cinematic film still of a cat basking in the sun on a roof in Turkey, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain quality sharp focus beautiful detailed intricate stunning amazing epic". > [!TIP] > You should also use a [*offset noise*](https://www.crosslabs.org//blog/diffusion-with-offset-noise) LoRA to improve the contrast in bright and dark images and create better lighting overall. This [LoRA](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_offset_example-lora_1.0.safetensors) is available from [stabilityai/stable-diffusion-xl-base-1.0](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0). Start by defining certain styles and a list of words (you can check out a more comprehensive list of [words](https://hf.co/LykosAI/GPT-Prompt-Expansion-Fooocus-v2/blob/main/positive.txt) and [styles](https://github.com/lllyasviel/Fooocus/tree/main/sdxl_styles) used by Fooocus) to enhance a prompt with. ```py import torch from transformers import GenerationConfig, GPT2LMHeadModel, GPT2Tokenizer, LogitsProcessor, LogitsProcessorList from diffusers import StableDiffusionXLPipeline styles = { "cinematic": "cinematic film still of {prompt}, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain", "anime": "anime artwork of {prompt}, anime style, key visual, vibrant, studio anime, highly detailed", "photographic": "cinematic photo of {prompt}, 35mm photograph, film, professional, 4k, highly detailed", "comic": "comic of {prompt}, graphic illustration, comic art, graphic novel art, vibrant, highly detailed", "lineart": "line art drawing {prompt}, professional, sleek, modern, minimalist, graphic, line art, vector graphics", "pixelart": " pixel-art {prompt}, low-res, blocky, pixel art style, 8-bit graphics", } words = [ "aesthetic", "astonishing", "beautiful", "breathtaking", "composition", "contrasted", "epic", "moody", "enhanced", "exceptional", "fascinating", "flawless", "glamorous", "glorious", "illumination", "impressive", "improved", "inspirational", "magnificent", "majestic", "hyperrealistic", "smooth", "sharp", "focus", "stunning", "detailed", "intricate", "dramatic", "high", "quality", "perfect", "light", "ultra", "highly", "radiant", "satisfying", "soothing", "sophisticated", "stylish", "sublime", "terrific", "touching", "timeless", "wonderful", "unbelievable", "elegant", "awesome", "amazing", "dynamic", "trendy", ] ``` You may have noticed in the `words` list, there are certain words that can be paired together to create something more meaningful. For example, the words "high" and "quality" can be combined to create "high quality". Let's pair these words together and remove the words that can't be paired. ```py word_pairs = ["highly detailed", "high quality", "enhanced quality", "perfect composition", "dynamic light"] def find_and_order_pairs(s, pairs): words = s.split() found_pairs = [] for pair in pairs: pair_words = pair.split() if pair_words[0] in words and pair_words[1] in words: found_pairs.append(pair) words.remove(pair_words[0]) words.remove(pair_words[1]) for word in words[:]: for pair in pairs: if word in pair.split(): words.remove(word) break ordered_pairs = ", ".join(found_pairs) remaining_s = ", ".join(words) return ordered_pairs, remaining_s ``` Next, implement a custom [`~transformers.LogitsProcessor`] class that assigns tokens in the `words` list a value of 0 and assigns tokens not in the `words` list a negative value so they aren't picked during generation. This way, generation is biased towards words in the `words` list. After a word from the list is used, it is also assigned a negative value so it isn't picked again. ```py class CustomLogitsProcessor(LogitsProcessor): def __init__(self, bias): super().__init__() self.bias = bias def __call__(self, input_ids, scores): if len(input_ids.shape) == 2: last_token_id = input_ids[0, -1] self.bias[last_token_id] = -1e10 return scores + self.bias word_ids = [tokenizer.encode(word, add_prefix_space=True)[0] for word in words] bias = torch.full((tokenizer.vocab_size,), -float("Inf")).to("cuda") bias[word_ids] = 0 processor = CustomLogitsProcessor(bias) processor_list = LogitsProcessorList([processor]) ``` Combine the prompt and the `cinematic` style prompt defined in the `styles` dictionary earlier. ```py prompt = "a cat basking in the sun on a roof in Turkey" style = "cinematic" prompt = styles[style].format(prompt=prompt) prompt "cinematic film still of a cat basking in the sun on a roof in Turkey, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" ``` Load a GPT2 tokenizer and model from the [Gustavosta/MagicPrompt-Stable-Diffusion](https://huggingface.co/Gustavosta/MagicPrompt-Stable-Diffusion) checkpoint (this specific checkpoint is trained to generate prompts) to enhance the prompt. ```py tokenizer = GPT2Tokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion") model = GPT2LMHeadModel.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion", torch_dtype=torch.float16).to( "cuda" ) model.eval() inputs = tokenizer(prompt, return_tensors="pt").to("cuda") token_count = inputs["input_ids"].shape[1] max_new_tokens = 50 - token_count generation_config = GenerationConfig( penalty_alpha=0.7, top_k=50, eos_token_id=model.config.eos_token_id, pad_token_id=model.config.eos_token_id, pad_token=model.config.pad_token_id, do_sample=True, ) with torch.no_grad(): generated_ids = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=max_new_tokens, generation_config=generation_config, logits_processor=proccesor_list, ) ``` Then you can combine the input prompt and the generated prompt. Feel free to take a look at what the generated prompt (`generated_part`) is, the word pairs that were found (`pairs`), and the remaining words (`words`). This is all packed together in the `enhanced_prompt`. ```py output_tokens = [tokenizer.decode(generated_id, skip_special_tokens=True) for generated_id in generated_ids] input_part, generated_part = output_tokens[0][: len(prompt)], output_tokens[0][len(prompt) :] pairs, words = find_and_order_pairs(generated_part, word_pairs) formatted_generated_part = pairs + ", " + words enhanced_prompt = input_part + ", " + formatted_generated_part enhanced_prompt ["cinematic film still of a cat basking in the sun on a roof in Turkey, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain quality sharp focus beautiful detailed intricate stunning amazing epic"] ``` Finally, load a pipeline and the offset noise LoRA with a *low weight* to generate an image with the enhanced prompt. ```py pipeline = StableDiffusionXLPipeline.from_pretrained( "RunDiffusion/Juggernaut-XL-v9", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.load_lora_weights( "stabilityai/stable-diffusion-xl-base-1.0", weight_name="sd_xl_offset_example-lora_1.0.safetensors", adapter_name="offset", ) pipeline.set_adapters(["offset"], adapter_weights=[0.2]) image = pipeline( enhanced_prompt, width=1152, height=896, guidance_scale=7.5, num_inference_steps=25, ).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/non-enhanced-prompt.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"a cat basking in the sun on a roof in Turkey"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/enhanced-prompt.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"cinematic film still of a cat basking in the sun on a roof in Turkey, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain"</figcaption> </div> </div> ## Prompt weighting Prompt weighting provides a way to emphasize or de-emphasize certain parts of a prompt, allowing for more control over the generated image. A prompt can include several concepts, which gets turned into contextualized text embeddings. The embeddings are used by the model to condition its cross-attention layers to generate an image (read the Stable Diffusion [blog post](https://huggingface.co/blog/stable_diffusion) to learn more about how it works). Prompt weighting works by increasing or decreasing the scale of the text embedding vector that corresponds to its concept in the prompt because you may not necessarily want the model to focus on all concepts equally. The easiest way to prepare the prompt-weighted embeddings is to use [Compel](https://github.com/damian0815/compel), a text prompt-weighting and blending library. Once you have the prompt-weighted embeddings, you can pass them to any pipeline that has a [`prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds) (and optionally [`negative_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.negative_prompt_embeds)) parameter, such as [`StableDiffusionPipeline`], [`StableDiffusionControlNetPipeline`], and [`StableDiffusionXLPipeline`]. <Tip> If your favorite pipeline doesn't have a `prompt_embeds` parameter, please open an [issue](https://github.com/huggingface/diffusers/issues/new/choose) so we can add it! </Tip> This guide will show you how to weight and blend your prompts with Compel in 🤗 Diffusers. Before you begin, make sure you have the latest version of Compel installed: ```py # uncomment to install in Colab #!pip install compel --upgrade ``` For this guide, let's generate an image with the prompt `"a red cat playing with a ball"` using the [`StableDiffusionPipeline`]: ```py from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler import torch pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_safetensors=True) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") prompt = "a red cat playing with a ball" generator = torch.Generator(device="cpu").manual_seed(33) image = pipe(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_0.png"/> </div> ### Weighting You'll notice there is no "ball" in the image! Let's use compel to upweight the concept of "ball" in the prompt. Create a [`Compel`](https://github.com/damian0815/compel/blob/main/doc/compel.md#compel-objects) object, and pass it a tokenizer and text encoder: ```py from compel import Compel compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) ``` compel uses `+` or `-` to increase or decrease the weight of a word in the prompt. To increase the weight of "ball": <Tip> `+` corresponds to the value `1.1`, `++` corresponds to `1.1^2`, and so on. Similarly, `-` corresponds to `0.9` and `--` corresponds to `0.9^2`. Feel free to experiment with adding more `+` or `-` in your prompt! </Tip> ```py prompt = "a red cat playing with a ball++" ``` Pass the prompt to `compel_proc` to create the new prompt embeddings which are passed to the pipeline: ```py prompt_embeds = compel_proc(prompt) generator = torch.manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_1.png"/> </div> To downweight parts of the prompt, use the `-` suffix: ```py prompt = "a red------- cat playing with a ball" prompt_embeds = compel_proc(prompt) generator = torch.manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png"/> </div> You can even up or downweight multiple concepts in the same prompt: ```py prompt = "a red cat++ playing with a ball----" prompt_embeds = compel_proc(prompt) generator = torch.manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-pos-neg.png"/> </div> ### Blending You can also create a weighted *blend* of prompts by adding `.blend()` to a list of prompts and passing it some weights. Your blend may not always produce the result you expect because it breaks some assumptions about how the text encoder functions, so just have fun and experiment with it! ```py prompt_embeds = compel_proc('("a red cat playing with a ball", "jungle").blend(0.7, 0.8)') generator = torch.Generator(device="cuda").manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-blend.png"/> </div> ### Conjunction A conjunction diffuses each prompt independently and concatenates their results by their weighted sum. Add `.and()` to the end of a list of prompts to create a conjunction: ```py prompt_embeds = compel_proc('["a red cat", "playing with a", "ball"].and()') generator = torch.Generator(device="cuda").manual_seed(55) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-conj.png"/> </div> ### Textual inversion [Textual inversion](../training/text_inversion) is a technique for learning a specific concept from some images which you can use to generate new images conditioned on that concept. Create a pipeline and use the [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] function to load the textual inversion embeddings (feel free to browse the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer) for 100+ trained concepts): ```py import torch from diffusers import StableDiffusionPipeline from compel import Compel, DiffusersTextualInversionManager pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda") pipe.load_textual_inversion("sd-concepts-library/midjourney-style") ``` Compel provides a `DiffusersTextualInversionManager` class to simplify prompt weighting with textual inversion. Instantiate `DiffusersTextualInversionManager` and pass it to the `Compel` class: ```py textual_inversion_manager = DiffusersTextualInversionManager(pipe) compel_proc = Compel( tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder, textual_inversion_manager=textual_inversion_manager) ``` Incorporate the concept to condition a prompt with using the `<concept>` syntax: ```py prompt_embeds = compel_proc('("A red cat++ playing with a ball <midjourney-style>")') image = pipe(prompt_embeds=prompt_embeds).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-text-inversion.png"/> </div> ### DreamBooth [DreamBooth](../training/dreambooth) is a technique for generating contextualized images of a subject given just a few images of the subject to train on. It is similar to textual inversion, but DreamBooth trains the full model whereas textual inversion only fine-tunes the text embeddings. This means you should use [`~DiffusionPipeline.from_pretrained`] to load the DreamBooth model (feel free to browse the [Stable Diffusion Dreambooth Concepts Library](https://huggingface.co/sd-dreambooth-library) for 100+ trained models): ```py import torch from diffusers import DiffusionPipeline, UniPCMultistepScheduler from compel import Compel pipe = DiffusionPipeline.from_pretrained("sd-dreambooth-library/dndcoverart-v1", torch_dtype=torch.float16).to("cuda") pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) ``` Create a `Compel` class with a tokenizer and text encoder, and pass your prompt to it. Depending on the model you use, you'll need to incorporate the model's unique identifier into your prompt. For example, the `dndcoverart-v1` model uses the identifier `dndcoverart`: ```py compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) prompt_embeds = compel_proc('("magazine cover of a dndcoverart dragon, high quality, intricate details, larry elmore art style").and()') image = pipe(prompt_embeds=prompt_embeds).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-dreambooth.png"/> </div> ### Stable Diffusion XL Stable Diffusion XL (SDXL) has two tokenizers and text encoders so it's usage is a bit different. To address this, you should pass both tokenizers and encoders to the `Compel` class: ```py from compel import Compel, ReturnedEmbeddingsType from diffusers import DiffusionPipeline from diffusers.utils import make_image_grid import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", use_safetensors=True, torch_dtype=torch.float16 ).to("cuda") compel = Compel( tokenizer=[pipeline.tokenizer, pipeline.tokenizer_2] , text_encoder=[pipeline.text_encoder, pipeline.text_encoder_2], returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, requires_pooled=[False, True] ) ``` This time, let's upweight "ball" by a factor of 1.5 for the first prompt, and downweight "ball" by 0.6 for the second prompt. The [`StableDiffusionXLPipeline`] also requires [`pooled_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLInpaintPipeline.__call__.pooled_prompt_embeds) (and optionally [`negative_pooled_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_pooled_prompt_embeds)) so you should pass those to the pipeline along with the conditioning tensors: ```py # apply weights prompt = ["a red cat playing with a (ball)1.5", "a red cat playing with a (ball)0.6"] conditioning, pooled = compel(prompt) # generate image generator = [torch.Generator().manual_seed(33) for _ in range(len(prompt))] images = pipeline(prompt_embeds=conditioning, pooled_prompt_embeds=pooled, generator=generator, num_inference_steps=30).images make_image_grid(images, rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/sdxl_ball1.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"a red cat playing with a (ball)1.5"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/sdxl_ball2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"a red cat playing with a (ball)0.6"</figcaption> </div> </div>
diffusers/docs/source/en/using-diffusers/weighted_prompts.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/weighted_prompts.md", "repo_id": "diffusers", "token_count": 7825 }