text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Mimi model.""" import inspect import os import tempfile import unittest import numpy as np from datasets import Audio, load_dataset from parameterized import parameterized from pytest import mark from transformers import AutoFeatureExtractor, MimiConfig from transformers.testing_utils import ( is_flaky, is_torch_available, require_flash_attn, require_torch, require_torch_gpu, require_torch_sdpa, slow, torch_device, ) from transformers.utils import ( is_torch_bf16_available_on_device, is_torch_fp16_available_on_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, sdpa_kernel if is_torch_available(): import torch from transformers import MimiModel # Copied from transformers.tests.encodec.test_modeling_encodec.prepare_inputs_dict def prepare_inputs_dict( config, input_ids=None, input_values=None, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if input_ids is not None: encoder_dict = {"input_ids": input_ids} else: encoder_dict = {"input_values": input_values} decoder_dict = {"decoder_input_ids": decoder_input_ids} if decoder_input_ids is not None else {} return {**encoder_dict, **decoder_dict} @require_torch class MimiModelTester: def __init__( self, parent, batch_size=5, num_channels=1, is_training=False, intermediate_size=40, hidden_size=32, num_filters=8, num_residual_layers=1, upsampling_ratios=[8, 4], codebook_size=64, vector_quantization_hidden_dimension=64, codebook_dim=64, upsample_groups=32, num_hidden_layers=2, num_attention_heads=2, num_key_value_heads=2, sliding_window=4, use_cache=False, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.intermediate_size = intermediate_size self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.codebook_size = codebook_size self.vector_quantization_hidden_dimension = vector_quantization_hidden_dimension self.codebook_dim = codebook_dim self.upsample_groups = upsample_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.sliding_window = sliding_window self.use_cache = use_cache def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.num_channels, self.intermediate_size], scale=1.0) config = self.get_config() inputs_dict = {"input_values": input_values} return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def prepare_config_and_inputs_for_model_class(self, model_class): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["audio_codes"] = ids_tensor([self.batch_size, 1, self.num_channels], self.codebook_size).type( torch.int32 ) return config, inputs_dict def get_config(self): return MimiConfig( audio_channels=self.num_channels, chunk_in_sec=None, hidden_size=self.hidden_size, num_filters=self.num_filters, num_residual_layers=self.num_residual_layers, upsampling_ratios=self.upsampling_ratios, codebook_size=self.codebook_size, vector_quantization_hidden_dimension=self.vector_quantization_hidden_dimension, upsample_groups=self.upsample_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, sliding_window=self.sliding_window, codebook_dim=self.codebook_dim, use_cache=self.use_cache, ) def create_and_check_model_forward(self, config, inputs_dict): model = MimiModel(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] result = model(input_values) self.parent.assertEqual( result.audio_values.shape, (self.batch_size, self.num_channels, self.intermediate_size) ) @require_torch class MimiModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (MimiModel,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False test_torchscript = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): # model does support returning hidden states inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if "output_attentions" in inputs_dict: inputs_dict.pop("output_attentions") if "output_hidden_states" in inputs_dict: inputs_dict.pop("output_hidden_states") return inputs_dict def setUp(self): self.model_tester = MimiModelTester(self) self.config_tester = ConfigTester( self, config_class=MimiConfig, hidden_size=37, common_properties=[], has_text_modality=False ) def test_config(self): self.config_tester.run_common_tests() def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values", "padding_mask", "num_quantizers"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip(reason="The MimiModel does not have `inputs_embeds` logics") def test_inputs_embeds(self): pass @unittest.skip(reason="The MimiModel does not have `inputs_embeds` logics") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="The MimiModel does not have the usual `attention` logic") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="The MimiModel does not have the usual `attention` logic") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="The MimiModel does not have the usual `hidden_states` logic") def test_torchscript_output_hidden_state(self): pass # Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest._create_and_check_torchscript def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: main_input = inputs[main_input_name] model(main_input) traced_model = torch.jit.trace(model, main_input) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() @unittest.skip(reason="The MimiModel does not have the usual `attention` logic") def test_attention_outputs(self): pass @unittest.skip(reason="The MimiModel does not have the usual `hidden_states` logic") def test_hidden_states_output(self): pass # Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest.test_determinism def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): # outputs are not tensors but list (since each sequence don't have the same frame_length) out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) # Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest.test_model_outputs_equivalence def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs) self.assertTrue(isinstance(tuple_output, tuple)) self.assertTrue(isinstance(dict_output, dict)) for tuple_value, dict_value in zip(tuple_output, dict_output.values()): self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_value), set_nan_tensor_to_zero(dict_value), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_value - dict_value))}. Tuple has `nan`:" f" {torch.isnan(tuple_value).any()} and `inf`: {torch.isinf(tuple_value)}. Dict has" f" `nan`: {torch.isnan(dict_value).any()} and `inf`: {torch.isinf(dict_value)}." ), ) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv", "input_proj", "output_proj"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest.test_identity_shortcut def test_identity_shortcut(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_conv_shortcut = False self.model_tester.create_and_check_model_forward(config, inputs_dict) # Overwrite to use `audio_values` as the tensors to compare. # TODO: Try to do this in the parent class. @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa def test_eager_matches_sdpa_inference(self, torch_dtype: str): if torch_dtype == "float16" and torch_device == "cpu": self.skipTest("`replication_pad1d` not implemented for 'Half") if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") if torch_dtype == "float16" and not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") if torch_dtype == "bfloat16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest( f"bfloat16 not supported on {torch_device} (on the specific device currently used, e.g. Nvidia T4 GPU)" ) # Not sure whether it's fine to put torch.XXX in a decorator if torch is not available so hacking it here instead. if torch_dtype == "float16": torch_dtype = torch.float16 elif torch_dtype == "bfloat16": torch_dtype = torch.bfloat16 elif torch_dtype == "float32": torch_dtype = torch.float32 atols = { ("cpu", False, torch.float32): 1e-6, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-6, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-6, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-6, ("cuda", True, torch.bfloat16): 1e-2, ("cuda", True, torch.float16): 5e-3, } rtols = { ("cpu", False, torch.float32): 1e-4, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-4, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-4, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-4, ("cuda", True, torch.bfloat16): 3e-2, ("cuda", True, torch.float16): 5e-3, } def get_mean_reldiff(failcase, x, ref, atol, rtol): return f"{failcase}: mean relative difference: {((x - ref).abs() / (ref.abs() + 1e-12)).mean():.3e}, torch atol = {atol}, torch rtol = {rtol}" for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) # FIXME: we deactivate boolean mask for models using "use_mask_token" in their constructors. # These models support masking only in the case `use_mask_token=True`. Otherwise they cannot consume an input mask. # This means that the class needs to be instantiated much later, after `use_mask` is set, which means a significant refactor of the code. # However masking there is not done at any layers that matters (i.e self-attention), therefore we can safely deactivate it. deactivate_mask = "use_mask_token" in inspect.signature(model_class).parameters is_encoder_decoder = model.config.is_encoder_decoder with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) model_sdpa = model_sdpa.eval().to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: has_sdpa = True break if not has_sdpa and model_sdpa.config.model_type != "falcon": raise ValueError("The SDPA model should have SDPA attention layers") # We use these for loops instead of parameterized.expand just for the interest of avoiding loading/saving 16 times the model, # but it would be nicer to have an efficient way to use parameterized.expand fail_cases = [] for padding_side in ["left", "right"]: for use_mask in [False, True]: for output_attentions in [True, False]: can_output_attn = "output_attentions" in inspect.signature(model_sdpa.forward).parameters if not (self.has_attentions and can_output_attn) and output_attentions: continue for batch_size in [7]: dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: dummy_input = dummy_input.to(torch_dtype) dummy_input = dummy_input[:batch_size] if dummy_input.shape[0] != batch_size: if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: extension = torch.rand( batch_size - dummy_input.shape[0], *dummy_input.shape[1:], dtype=torch_dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) else: extension = torch.randint( high=5, size=(batch_size - dummy_input.shape[0], *dummy_input.shape[1:]), dtype=dummy_input.dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) if not use_mask: dummy_attention_mask = None else: dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is None: if is_encoder_decoder: seqlen = inputs_dict.get("decoder_input_ids", dummy_input).shape[-1] else: seqlen = dummy_input.shape[-1] dummy_attention_mask = ( torch.ones(batch_size, seqlen).to(torch.int64).to(torch_device) ) dummy_attention_mask = dummy_attention_mask[:batch_size] if dummy_attention_mask.shape[0] != batch_size: extension = torch.ones( batch_size - dummy_attention_mask.shape[0], *dummy_attention_mask.shape[1:], dtype=dummy_attention_mask.dtype, device=torch_device, ) dummy_attention_mask = torch.cat((dummy_attention_mask, extension), dim=0) dummy_attention_mask = dummy_attention_mask.to(torch_device) dummy_attention_mask[:] = 1 if padding_side == "left": dummy_attention_mask[-1, :2] = 0 dummy_attention_mask[-1, 2:] = 1 elif padding_side == "right": dummy_attention_mask[-1, -2:] = 0 dummy_attention_mask[-1, :-2] = 1 for enable_kernels in [False, True]: failcase = f"padding_side={padding_side}, use_mask={use_mask}, batch_size={batch_size}, enable_kernels={enable_kernels}" if is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[ :batch_size ] if decoder_input_ids.shape[0] != batch_size: extension = torch.ones( batch_size - decoder_input_ids.shape[0], *decoder_input_ids.shape[1:], dtype=decoder_input_ids.dtype, device=torch_device, ) decoder_input_ids = torch.cat((decoder_input_ids, extension), dim=0) decoder_input_ids = decoder_input_ids.to(torch_device) # TODO: never an `attention_mask` arg here? processed_inputs = { model.main_input_name: dummy_input, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } else: processed_inputs = { model.main_input_name: dummy_input, "output_hidden_states": True, } # Otherwise fails for e.g. WhisperEncoderModel if "attention_mask" in inspect.signature(model_eager.forward).parameters: processed_inputs["attention_mask"] = dummy_attention_mask if ( self.has_attentions and "output_attentions" in inspect.signature(model_sdpa.forward).parameters ): processed_inputs["output_attentions"] = output_attentions if not deactivate_mask and ( "bool_masked_pos" in inspect.signature(model_eager.forward).parameters ): dummy_mask = torch.ones((self.model_tester.num_masks,)) # In case of additional token (like class) we define a custom `mask_length` if hasattr(self.model_tester, "mask_length"): mask_length = self.model_tester.mask_length - dummy_mask.size(0) else: mask_length = self.model_tester.seq_length - dummy_mask.size(0) dummy_mask = torch.cat([dummy_mask, torch.zeros(mask_length)]) dummy_bool_masked_pos = dummy_mask.expand(batch_size, -1).bool() processed_inputs["bool_masked_pos"] = dummy_bool_masked_pos.to(torch_device) if "noise" in inspect.signature(model_eager.forward).parameters: np.random.seed(2) num_patches = int( (self.model_tester.image_size // self.model_tester.patch_size) ** 2 ) noise = np.random.uniform(size=(batch_size, num_patches)) processed_inputs["noise"] = torch.from_numpy(noise) # TODO: test gradients as well (& for FA2 as well!) with torch.no_grad(): with sdpa_kernel( enable_flash=enable_kernels, enable_math=True, enable_mem_efficient=enable_kernels, ): prepared_inputs = self._prepare_for_class(processed_inputs, model_class) outputs_eager = model_eager(**prepared_inputs) outputs_sdpa = model_sdpa(**prepared_inputs) # Ignore copy logits_eager = outputs_eager.audio_values # Ignore copy logits_sdpa = outputs_sdpa.audio_values if torch_device in ["cpu", "cuda"]: atol = atols[torch_device, enable_kernels, torch_dtype] rtol = rtols[torch_device, enable_kernels, torch_dtype] elif torch_device == "xpu": # As of PyTorch 2.5 XPU backend supports only torch.nn.attention.SDPBackend.MATH # which is implemented on PyTorch level using aten operators and is # device agnostic with respect to implementation of each aten operator. atol = atols["cuda", False, torch_dtype] rtol = rtols["cuda", False, torch_dtype] else: atol = 1e-7 rtol = 1e-4 # Masked tokens output slightly deviates - we don't mind that. if use_mask: _logits_sdpa = torch.zeros_like(input=logits_sdpa) _logits_eager = torch.zeros_like(input=logits_eager) _logits_sdpa[:-1] = logits_sdpa[:-1] _logits_eager[:-1] = logits_eager[:-1] if padding_side == "left": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, 2:] _logits_eager[-1:, 2:] = logits_eager[-1:, 2:] elif padding_side == "right": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, :-2] _logits_eager[-1:, 2:] = logits_eager[-1:, :-2] logits_sdpa = _logits_sdpa logits_eager = _logits_eager results = [ torch.allclose(_logits_sdpa, _logits_eager, atol=atol, rtol=rtol) for (_logits_sdpa, _logits_eager) in zip(logits_sdpa, logits_eager) ] # If 80% batch elements have matched results, it's fine if np.mean(results) < 0.8: fail_cases.append( get_mean_reldiff(failcase, logits_sdpa, logits_eager, atol, rtol) ) self.assertTrue(len(fail_cases) == 0, "\n".join(fail_cases)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) outputs = model(dummy_input) outputs_fa = model_fa(dummy_input) logits = outputs[1] logits_fa = outputs_fa[1] assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) @unittest.skip(reason="The MimiModel does not support right padding") def test_flash_attn_2_inference_equivalence_right_padding(self): pass @unittest.skip(reason="The MimiModel does not have support dynamic compile yet") def test_sdpa_can_compile_dynamic(self): pass # Copied from transformers.tests.encodec.test_modeling_encodec.normalize def normalize(arr): norm = np.linalg.norm(arr) normalized_arr = arr / norm return normalized_arr # Copied from transformers.tests.encodec.test_modeling_encodec.compute_rmse def compute_rmse(arr1, arr2): arr1_normalized = normalize(arr1) arr2_normalized = normalize(arr2) return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean()) @slow @require_torch class MimiIntegrationTest(unittest.TestCase): def test_integration_using_cache_decode(self): expected_rmse = { "8": 0.0018785292, "32": 0.0012330565, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "kyutai/mimi" model = MimiModel.from_pretrained(model_id, use_cache=True).to(torch_device) processor = AutoFeatureExtractor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to(torch_device) for num_codebooks, expected_rmse in expected_rmse.items(): with torch.no_grad(): # use max bandwith for best possible reconstruction encoder_outputs = model.encode(inputs["input_values"], num_quantizers=int(num_codebooks)) audio_codes = encoder_outputs[0] decoder_outputs_first_part = model.decode(audio_codes[:, :, : audio_codes.shape[2] // 2]) decoder_outputs_second_part = model.decode( audio_codes[:, :, audio_codes.shape[2] // 2 :], decoder_past_key_values=decoder_outputs_first_part.decoder_past_key_values, ) audio_output_entire_context = model.decode(audio_codes)[0] audio_output_concat_context = torch.cat( [decoder_outputs_first_part[0], decoder_outputs_second_part[0]], dim=2 ) # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse( audio_output_concat_context.squeeze().cpu().numpy(), audio_output_entire_context.squeeze().cpu().numpy(), ) self.assertTrue(rmse < 1e-3) def test_integration(self): expected_rmses = { "8": 0.0018785292, "32": 0.0012330565, } expected_codesums = { "8": 426176, "32": 1795819, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "kyutai/mimi" processor = AutoFeatureExtractor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to(torch_device) for use_cache in [False, True]: model = MimiModel.from_pretrained(model_id, use_cache=use_cache).to(torch_device) for num_codebooks, expected_rmse in expected_rmses.items(): with torch.no_grad(): # use max bandwith for best possible reconstruction encoder_outputs = model.encode(inputs["input_values"], num_quantizers=int(num_codebooks)) audio_code_sums = encoder_outputs[0].sum().cpu().item() # make sure audio encoded codes are correct # assert relative difference less than a threshold, because `audio_code_sums` varies a bit # depending on torch version self.assertTrue( np.abs(audio_code_sums - expected_codesums[num_codebooks]) <= (3e-3 * audio_code_sums) ) input_values_dec = model.decode(encoder_outputs[0], padding_mask=inputs["padding_mask"])[0] input_values_enc_dec = model( inputs["input_values"], inputs["padding_mask"], num_quantizers=int(num_codebooks) )[1] # make sure forward and decode gives same result torch.testing.assert_close(input_values_dec, input_values_enc_dec) # make sure shape matches self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape) arr = inputs["input_values"][0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(np.abs(rmse - expected_rmse) < 1e-5)
transformers/tests/models/mimi/test_modeling_mimi.py/0
{ "file_path": "transformers/tests/models/mimi/test_modeling_mimi.py", "repo_id": "transformers", "token_count": 21775 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Moonshine model.""" import copy import unittest from transformers import MoonshineConfig, is_torch_available from transformers.testing_utils import cleanup, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, floats_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoProcessor, MoonshineForConditionalGeneration, MoonshineModel, ) from datasets import load_dataset class MoonshineModelTester: def __init__( self, parent, batch_size=3, # need batch_size != num_hidden_layers seq_length=1000, is_training=False, use_labels=False, vocab_size=147, hidden_size=8, intermediate_size=32, num_hidden_layers=2, num_attention_heads=2, num_key_value_heads=2, encoder_hidden_act="gelu", decoder_hidden_act="silu", decoder_start_token_id=85, bos_token_id=98, eos_token_id=98, pad_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.use_labels = use_labels self.vocab_size = vocab_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.encoder_hidden_act = encoder_hidden_act self.decoder_hidden_act = decoder_hidden_act self.decoder_start_token_id = decoder_start_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) decoder_input_ids = torch.tensor(self.batch_size * [[self.decoder_start_token_id]], device=torch_device) decoder_attention_mask = decoder_input_ids.ne(self.pad_token_id) config = self.get_config() return config, input_values, attention_mask, decoder_input_ids, decoder_attention_mask def get_config(self): return MoonshineConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, intermediate_size=self.intermediate_size, encoder_num_hidden_layers=self.num_hidden_layers, decoder_num_hidden_layers=self.num_hidden_layers, encoder_num_attention_heads=self.num_attention_heads, decoder_num_attention_heads=self.num_attention_heads, encoder_num_key_value_heads=self.num_key_value_heads, decoder_num_key_value_heads=self.num_key_value_heads, encoder_hidden_act=self.encoder_hidden_act, decoder_hidden_act=self.decoder_hidden_act, decoder_start_token_id=self.decoder_start_token_id, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, ) def create_and_check_model(self, config, input_values, attention_mask): model = MoonshineModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = MoonshineModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_output_attentions(self, config, input_values, attention_mask): model = MoonshineModel(config=config) model.config.layerdrop = 1.0 model.to(torch_device) model.train() outputs = model(input_values, attention_mask=attention_mask, output_attentions=True) self.parent.assertTrue(len(outputs.attentions) > 0) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask, decoder_input_ids, decoder_attention_mask = ( self.prepare_config_and_inputs() ) inputs_dict = { "input_values": input_values, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_torch class MoonshineModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MoonshineModel, MoonshineForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( { "automatic-speech-recognition": MoonshineForConditionalGeneration, "feature-extraction": MoonshineModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = MoonshineModelTester(self) self.config_tester = ConfigTester(self, config_class=MoonshineConfig) def test_config(self): self.config_tester.run_common_tests() def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", 1) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) # Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_hidden_states_output def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) decoder_input_ids = inputs.pop("decoder_input_ids", None) inputs.pop("decoder_attention_mask", None) wte = model.get_input_embeddings() inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] # Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_resize_tokens_embeddings def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is False") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # make sure that decoder_input_ids are resized if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Copied from tests.models.whisper.test_modeling_whisper.WhisperModelTest.test_resize_embeddings_untied def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is False") original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: self.skipTest(reason="Model cannot untie embeddings") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch class MoonshineModelIntegrationTests(unittest.TestCase): def setUp(self): self.processor_tiny = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny") self.processor_base = AutoProcessor.from_pretrained("UsefulSensors/moonshine-base") def tearDown(self): cleanup(torch_device, gc_collect=True) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] @slow def test_tiny_logits_single(self): model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny") model.to(torch_device) inputs = self.processor_tiny(self._load_datasamples(1), return_tensors="pt") inputs.to(torch_device) outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True) # fmt: off EXPECTED_LOGITS = torch.tensor([ -9.1106, 4.5542, 6.3892, -6.8139, -7.2456, -7.9074, -7.2839, -7.6043, -8.0384, -7.8351, -7.3867, -7.2450, -7.7420, -7.3912, -7.3866, -7.6979, -7.6420, -7.0504, -7.3979, -7.2483, -8.0796, -7.3300, -7.3672, -6.8765, -7.6876, -7.2682, -6.9866, -6.7457, -7.6855, -7.3050, ]) # fmt: on torch.testing.assert_close(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4) @slow def test_base_logits_single(self): model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base") model.to(torch_device) inputs = self.processor_base(self._load_datasamples(1), return_tensors="pt") inputs.to(torch_device) outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True) # fmt: off EXPECTED_LOGITS = torch.tensor([ -6.7336, 1.9482, 5.2448, -8.0277, -7.9167, -7.8956, -7.9649, -7.9348, -8.1312, -8.0616, -8.1070, -7.7696, -7.8809, -7.9450, -8.1013, -7.8177, -7.8598, -7.8257, -7.8729, -7.9657, -7.9310, -8.1024, -7.8699, -7.8231, -8.0752, -7.9764, -7.8127, -8.0536, -7.9492, -7.9290, ]) # fmt: on torch.testing.assert_close(outputs.logits[0][0, :30].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4) @slow def test_tiny_logits_batch(self): model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny") model.to(torch_device) inputs = self.processor_tiny(self._load_datasamples(4), return_tensors="pt", padding=True) inputs.to(torch_device) outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True) # fmt: off EXPECTED_LOGITS = torch.tensor([ [-8.0109, 5.0241, 4.5979, -6.8125, -7.1675, -7.8783, -7.2152, -7.5188, -7.9077, -7.7394], [-4.4399, -1.4422, 6.6710, -6.8929, -7.3751, -7.0969, -6.5257, -7.0257, -7.2585, -7.0008], [-10.0086, 3.2859, 0.7345, -6.5557, -6.8514, -6.5308, -6.4172, -6.9484, -6.6214, -6.6229], [-10.8078, 4.0030, -0.0633, -5.0505, -5.3906, -5.4590, -5.2420, -5.4746, -5.2665, -5.3158] ]) # fmt: on torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4) @slow def test_base_logits_batch(self): model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base") model.to(torch_device) inputs = self.processor_base(self._load_datasamples(4), return_tensors="pt", padding=True) inputs.to(torch_device) outputs = model.generate(**inputs, max_new_tokens=1, return_dict_in_generate=True, output_logits=True) # fmt: off EXPECTED_LOGITS = torch.tensor([ [-7.7272, 1.4630, 5.2294, -7.7313, -7.6252, -7.6011, -7.6788, -7.6441, -7.8452, -7.7549], [-6.2173, -0.5891, 7.9493, -7.0694, -6.9997, -6.9982, -7.0953, -7.0831, -7.1686, -7.0137], [-7.3184, 3.1192, 3.8937, -5.7206, -5.8428, -5.7609, -5.9996, -5.8212, -5.8615, -5.8719], [-9.5475, 1.0146, 4.1179, -5.9971, -6.0614, -6.0329, -6.2103, -6.0318, -6.0789, -6.0873] ]) # fmt: on torch.testing.assert_close(outputs.logits[0][:, :10].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4) @slow def test_tiny_generation_single(self): model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny") model.to(torch_device) audio_array = self._load_datasamples(1) inputs = self.processor_tiny(audio_array, return_tensors="pt") inputs.to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=20) transcript = self.processor_tiny.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "Mr. Quilter is the apostle of the middle classes, and we are glad to welcome" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_base_generation_single(self): model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base") model.to(torch_device) audio_array = self._load_datasamples(1) inputs = self.processor_base(audio_array, return_tensors="pt") inputs.to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=20) transcript = self.processor_base.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "Mr. Quilter is the apostle of the middle classes, and we are glad to welcome" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_generation_batch(self): model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny") model.to(torch_device) audio_array = self._load_datasamples(4) inputs = self.processor_tiny(audio_array, return_tensors="pt", padding=True) inputs.to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=20) transcript = self.processor_tiny.batch_decode(generated_ids, skip_special_tokens=True) # fmt: off EXPECTED_TRANSCRIPT = [ "Mr. Quilter is the apostle of the middle classes, and we are glad to welcome", "Nor is Mr. Quilter's manner less interesting than his matter.", "He tells us that at this festive season of the year, with Christmas and Rose beef lo", "He has grave doubts whether Sir Frederick Layton's work is really Greek after all,", ] # fmt: on self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_base_generation_batch(self): model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-base") model.to(torch_device) audio_array = self._load_datasamples(4) inputs = self.processor_base(audio_array, return_tensors="pt", padding=True) inputs.to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=20) transcript = self.processor_base.batch_decode(generated_ids, skip_special_tokens=True) # fmt: off EXPECTED_TRANSCRIPT = [ "Mr. Quilter is the apostle of the middle classes, and we are glad to welcome", "Nor is Mr. Quilter's manner less interesting than his matter.", "He tells us that at this festive season of the year, with Christmas and rose beef lo", "He has grave doubts whether Sir Frederick Layton's work is really Greek after all,", ] # fmt: on self.assertListEqual(transcript, EXPECTED_TRANSCRIPT)
transformers/tests/models/moonshine/test_modeling_moonshine.py/0
{ "file_path": "transformers/tests/models/moonshine/test_modeling_moonshine.py", "repo_id": "transformers", "token_count": 12733 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Pop2Piano model.""" import copy import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import Pop2PianoConfig from transformers.feature_extraction_utils import BatchFeature from transformers.testing_utils import ( require_essentia, require_librosa, require_onnx, require_scipy, require_torch, slow, torch_device, ) from transformers.utils import is_essentia_available, is_librosa_available, is_scipy_available, is_torch_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import Pop2PianoForConditionalGeneration @require_torch class Pop2PianoModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, # For common tests is_training=False, use_attention_mask=True, use_labels=True, hidden_size=64, num_hidden_layers=5, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = ( ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) if self.use_labels else None ) return self.get_config(), input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels def get_pipeline_config(self): return Pop2PianoConfig( vocab_size=166, # Pop2Piano forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return Pop2PianoConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = Pop2PianoForConditionalGeneration(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add causal pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = Pop2PianoForConditionalGeneration(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = Pop2PianoForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = Pop2PianoForConditionalGeneration(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = Pop2PianoForConditionalGeneration(config=config).get_decoder() model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = Pop2PianoForConditionalGeneration(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = Pop2PianoForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = Pop2PianoForConditionalGeneration(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)[ "encoder_last_hidden_state" ] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [Pop2PianoForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_resize_embeddings_pop2piano_v1_1( self, config, ): prev_vocab_size = config.vocab_size config.tie_word_embeddings = False model = Pop2PianoForConditionalGeneration(config=config).to(torch_device).eval() model.resize_token_embeddings(prev_vocab_size - 10) self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict @require_torch class Pop2PianoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Pop2PianoForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = () pipeline_model_mapping = ( {"automatic-speech-recognition": Pop2PianoForConditionalGeneration} if is_torch_available() else {} ) all_parallelizable_model_classes = () fx_compatible = False test_pruning = False test_resize_embeddings = True test_model_parallel = False is_encoder_decoder = True def setUp(self): self.model_tester = Pop2PianoModelTester(self) self.config_tester = ConfigTester(self, config_class=Pop2PianoConfig, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_config_and_model_silu_gated(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.feed_forward_proj = "gated-silu" self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_pop2piano_v1_1(config) @slow def test_model_from_pretrained(self): model_name = "sweetcocoa/pop2piano" model = Pop2PianoForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) @require_onnx def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() model = Pop2PianoForConditionalGeneration(config_and_inputs[0]).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f"{tmpdirname}/Pop2Piano_test.onnx", export_params=True, opset_version=14, input_names=["input_ids", "decoder_input_ids"], ) def test_pass_with_input_features(self): input_features = BatchFeature( { "input_features": torch.rand((75, 100, 512)).type(torch.float32), "beatsteps": torch.randint(size=(1, 955), low=0, high=100).type(torch.float32), "extrapolated_beatstep": torch.randint(size=(1, 900), low=0, high=100).type(torch.float32), } ) model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") model_opts = model.generate(input_features=input_features["input_features"], return_dict_in_generate=True) self.assertEqual(model_opts.sequences.ndim, 2) def test_pass_with_batched_input_features(self): input_features = BatchFeature( { "input_features": torch.rand((220, 70, 512)).type(torch.float32), "beatsteps": torch.randint(size=(5, 955), low=0, high=100).type(torch.float32), "extrapolated_beatstep": torch.randint(size=(5, 900), low=0, high=100).type(torch.float32), "attention_mask": torch.concatenate( [ torch.ones([120, 70], dtype=torch.int32), torch.zeros([1, 70], dtype=torch.int32), torch.ones([50, 70], dtype=torch.int32), torch.zeros([1, 70], dtype=torch.int32), torch.ones([47, 70], dtype=torch.int32), torch.zeros([1, 70], dtype=torch.int32), ], axis=0, ), "attention_mask_beatsteps": torch.ones((5, 955)).type(torch.int32), "attention_mask_extrapolated_beatstep": torch.ones((5, 900)).type(torch.int32), } ) model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") model_opts = model.generate( input_features=input_features["input_features"], attention_mask=input_features["attention_mask"], return_dict_in_generate=True, ) self.assertEqual(model_opts.sequences.ndim, 2) @require_torch class Pop2PianoModelIntegrationTests(unittest.TestCase): @slow def test_mel_conditioner_integration(self): composer = "composer1" model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") input_embeds = torch.ones([10, 100, 512]) composer_value = model.generation_config.composer_to_feature_token[composer] composer_value = torch.tensor(composer_value) composer_value = composer_value.repeat(input_embeds.size(0)) outputs = model.mel_conditioner( input_embeds, composer_value, min(model.generation_config.composer_to_feature_token.values()) ) # check shape self.assertEqual(outputs.size(), torch.Size([10, 101, 512])) # check values EXPECTED_OUTPUTS = torch.tensor( [[1.0475305318832397, 0.29052114486694336, -0.47778210043907166], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]] ) torch.testing.assert_close(outputs[0, :3, :3], EXPECTED_OUTPUTS, rtol=1e-4, atol=1e-4) @slow @require_essentia @require_librosa @require_scipy def test_full_model_integration(self): if is_librosa_available() and is_scipy_available() and is_essentia_available() and is_torch_available(): from transformers import Pop2PianoProcessor speech_input1 = np.zeros([1_000_000], dtype=np.float32) sampling_rate = 44_100 processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") input_features = processor.feature_extractor( speech_input1, sampling_rate=sampling_rate, return_tensors="pt" ) model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") outputs = model.generate( input_features=input_features["input_features"], return_dict_in_generate=True ).sequences # check for shapes self.assertEqual(outputs.size(0), 70) # check for values self.assertEqual(outputs[0, :2].detach().cpu().numpy().tolist(), [0, 1]) # This is the test for a real music from K-Pop genre. @slow @require_essentia @require_librosa @require_scipy def test_real_music(self): if is_librosa_available() and is_scipy_available() and is_essentia_available() and is_torch_available(): from transformers import Pop2PianoFeatureExtractor, Pop2PianoTokenizer model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") model.eval() feature_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano") tokenizer = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano") ds = load_dataset("sweetcocoa/pop2piano_ci", split="test") output_fe = feature_extractor( ds["audio"][0]["array"], sampling_rate=ds["audio"][0]["sampling_rate"], return_tensors="pt" ) output_model = model.generate(input_features=output_fe["input_features"], composer="composer1") output_tokenizer = tokenizer.batch_decode(token_ids=output_model, feature_extractor_output=output_fe) pretty_midi_object = output_tokenizer["pretty_midi_objects"][0] # Checking if no of notes are same self.assertEqual(len(pretty_midi_object.instruments[0].notes), 59) predicted_timings = [] for i in pretty_midi_object.instruments[0].notes: predicted_timings.append(i.start) # Checking note start timings(first 6) EXPECTED_START_TIMINGS = [ 0.4876190423965454, 0.7314285635948181, 0.9752380847930908, 1.4396371841430664, 1.6718367338180542, 1.904036283493042, ] np.allclose(EXPECTED_START_TIMINGS, predicted_timings[:6]) # Checking note end timings(last 6) EXPECTED_END_TIMINGS = [ 12.341403007507324, 12.567797183990479, 12.567797183990479, 12.567797183990479, 12.794191360473633, 12.794191360473633, ] np.allclose(EXPECTED_END_TIMINGS, predicted_timings[-6:])
transformers/tests/models/pop2piano/test_modeling_pop2piano.py/0
{ "file_path": "transformers/tests/models/pop2piano/test_modeling_pop2piano.py", "repo_id": "transformers", "token_count": 15020 }
# coding=utf-8 # Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen2.5-VL model.""" import gc import unittest import requests from transformers import ( AutoProcessor, Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, ) if is_torch_available(): import torch else: is_torch_greater_or_equal_than_2_0 = False if is_vision_available(): from PIL import Image class Qwen2_5_VLVisionText2TextModelTester: def __init__( self, parent, batch_size=3, seq_length=7, num_channels=3, ignore_index=-100, image_size=14, bos_token_id=0, eos_token_id=1, pad_token_id=2, vision_start_token_id=3, image_token_id=4, video_token_id=5, hidden_act="silu", hidden_size=32, vocab_size=99, intermediate_size=37, max_position_embeddings=512, max_window_layers=3, model_type="qwen2_5_vl", num_attention_heads=4, num_hidden_layers=4, num_key_value_heads=2, rope_theta=10000, tie_word_embeddings=True, is_training=True, vision_config={ "depth": 2, "in_chans": 3, "hidden_act": "silu", "intermediate_size": 32, "out_hidden_size": 32, "hidden_size": 32, "num_heads": 4, "patch_size": 14, "spatial_patch_size": 14, "spatial_merge_size": 1, "temporal_patch_size": 2, }, rope_scaling={"type": "mrope", "mrope_section": [2, 1, 1]}, ): self.parent = parent self.ignore_index = ignore_index self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.vision_start_token_id = vision_start_token_id self.image_token_id = image_token_id self.video_token_id = video_token_id self.hidden_act = hidden_act self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.max_position_embeddings = max_position_embeddings self.max_window_layers = max_window_layers self.model_type = model_type self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.num_key_value_heads = num_key_value_heads self.rope_theta = rope_theta self.tie_word_embeddings = tie_word_embeddings self.vision_config = vision_config self.rope_scaling = rope_scaling self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.is_training = is_training self.vocab_size = vocab_size self.num_image_tokens = 32 self.seq_length = seq_length + self.num_image_tokens def get_config(self): return Qwen2_5_VLConfig( hidden_size=self.hidden_size, intermediate_size=self.intermediate_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, hidden_act=self.hidden_act, max_position_embeddings=self.max_position_embeddings, vision_config=self.vision_config, model_type=self.model_type, max_window_layers=self.max_window_layers, rope_scaling=self.rope_scaling, tie_word_embeddings=self.tie_word_embeddings, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, vision_start_token_id=self.vision_start_token_id, image_token_id=self.image_token_id, video_token_id=self.video_token_id, vocab_size=self.vocab_size, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.vision_config.patch_size temporal_patch_size = config.vision_config.temporal_patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2) * temporal_patch_size, ] ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) input_ids[:, -1] = self.pad_token_id input_ids[input_ids == self.video_token_id] = self.pad_token_id input_ids[input_ids == self.image_token_id] = self.pad_token_id input_ids[:, self.num_image_tokens] = self.image_token_id labels = torch.zeros( (self.batch_size, self.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict = { "pixel_values": pixel_values, "image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size), "input_ids": input_ids, "attention_mask": attention_mask, "labels": labels, } return config, inputs_dict def create_and_check_qwen2_5_vl_model_fp16_forward( self, config, input_ids, pixel_values, attention_mask, image_grid_thw ): model = Qwen2_5_VLForConditionalGeneration(config=config) model.to(torch_device) model.half() model.eval() logits = model( input_ids=input_ids, attention_mask=attention_mask, image_grid_thw=image_grid_thw, pixel_values=pixel_values.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) def create_and_check_qwen2_5_vl_model_fp16_autocast_forward( self, config, input_ids, pixel_values, attention_mask, image_grid_thw ): config.torch_dtype = torch.float16 model = Qwen2_5_VLForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.autocast(device_type="cuda", dtype=torch.float16): logits = model( input_ids=input_ids, attention_mask=attention_mask, image_grid_thw=image_grid_thw, pixel_values=pixel_values.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) @require_torch class Qwen2_5_VLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `Qwen2_5_VLForConditionalGeneration`. """ all_model_classes = (Qwen2_5_VLForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (Qwen2_5_VLForConditionalGeneration,) if is_torch_available() else () test_pruning = False test_head_masking = False def setUp(self): self.model_tester = Qwen2_5_VLVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen2_5_VLConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) _ = model(**input_dict) # successfull forward with no modifications # remove one image but leave the image token in text patch_size = config.vision_config.patch_size one_img_length = (self.model_tester.image_size**2) // (patch_size**2) input_dict["pixel_values"] = input_dict["pixel_values"][-one_img_length:, ...] input_dict["image_grid_thw"] = input_dict["image_grid_thw"][-1:, ...] with self.assertRaises(ValueError): _ = model(**input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = input_dict["input_ids"][:1] pixel_values = input_dict["pixel_values"][:one_img_length] image_grid_thw = input_dict["image_grid_thw"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0) _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) @unittest.skip(reason="Feedforward chunking is not yet supported") def test_feed_forward_chunking(self): pass @unittest.skip(reason="CPU offload is not yet supported") def test_cpu_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_bin(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_model_parallelism(self): pass @unittest.skip(reason="Compile not yet supported because in Qwen2_5_VL models") def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Compile not yet supported because in Qwen2_5_VL models") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="We cannot configure to output a smaller model.") def test_model_is_small(self): pass @unittest.skip( reason="Qwen2.5-VL can't do low-memory generation because position IDs have extra dimension and split function doesn't work for that" ) def test_beam_search_low_memory(self): pass @unittest.skip( reason="VLMs can't generate from inputs embeds and pixels. This can be tested as part of bacbone LM, no need to run the tes for VLMs" ) def test_generate_from_inputs_embeds_with_static_cache(self): pass @unittest.skip(reason="Can't compile fullgraph due to dynamic control flow in `prepare_inputs_for_generate`") def test_generate_compile_fullgraph(self): pass @require_torch class Qwen2_5_VLIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct") self.messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What kind of dog is this?"}, ], } ] url = "https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/demo_small.jpg" self.image = Image.open(requests.get(url, stream=True).raw) def tearDown(self): gc.collect() torch.cuda.empty_cache() @slow def test_small_model_integration_test(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text], images=[self.image], return_tensors="pt") expected_input_ids = [151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 151652, 151655, 151655] # fmt: skip assert torch.allclose(expected_input_ids, inputs.input_ids[0].tolist()[:17], atol=3e-3) expected_pixel_slice = torch.tensor( [ [0.8792, 0.8792, 0.9084], [1.1858, 1.1858, 1.2296], [1.2004, 1.2004, 1.2150], [1.4340, 1.4340, 1.4194], [1.3902, 1.4048, 1.4194], [1.5216, 1.5362, 1.5362], ], dtype=torch.float32, device="cpu", ) assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3) # verify generation inputs = inputs.to(torch_device) output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets" self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_wo_image(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets', 'system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\nI am Qwen, a large language model created by Alibaba Cloud. I am designed to assist with various tasks and answer questions to the best of my' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_different_resolutions(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) text2 = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) image2 = self.image.resize((224, 224)) inputs = self.processor( text=[text, text2], images=[self.image, image2], padding=True, return_tensors="pt", ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_flash_attn @require_torch_gpu def test_small_model_integration_test_batch_flashatt2(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True)[0], self.processor.batch_decode(output, skip_special_tokens=True)[1], ) @slow @require_flash_attn @require_torch_gpu def test_small_model_integration_test_batch_wo_image_flashatt2(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", "system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\nI am Qwen, a large language model created by Alibaba Cloud. I am designed to answer a wide range of questions and provide information on various topics", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, )
transformers/tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py/0
{ "file_path": "transformers/tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py", "repo_id": "transformers", "token_count": 10316 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the Tensorflow ResNet model.""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class TFResNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ResNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, ) def create_and_check_model(self, config, pixel_values, labels): model = TFResNetModel(config=config) result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = TFResNetForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFResNetModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ResNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False has_attentions = False def setUp(self): self.model_tester = TFResNetModelTester(self) self.config_tester = ConfigTester(self, config_class=ResNetConfig, has_text_modality=False) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="ResNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ResNet does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "microsoft/resnet-50" model = TFResNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFResNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("microsoft/resnet-50") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = TFResNetForImageClassification.from_pretrained("microsoft/resnet-50") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-11.1069, -9.7877, -8.3777]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4))
transformers/tests/models/resnet/test_modeling_tf_resnet.py/0
{ "file_path": "transformers/tests/models/resnet/test_modeling_tf_resnet.py", "repo_id": "transformers", "token_count": 3760 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class TFRoFormerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_lm_head( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFRoFormerForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRoFormerForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFRoFormerForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRoFormerForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFRoFormerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFRoFormerModel, "fill-mask": TFRoFormerForMaskedLM, "question-answering": TFRoFormerForQuestionAnswering, "text-classification": TFRoFormerForSequenceClassification, "text-generation": TFRoFormerForCausalLM, "token-classification": TFRoFormerForTokenClassification, "zero-shot": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: add `prepare_inputs_for_generation` for `TFRoFormerForCausalLM` def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name == "TextGenerationPipelineTests": return True return False def setUp(self): self.model_tester = TFRoFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base") self.assertIsNotNone(model) @require_tf class TFRoFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 50000 expected_shape = [1, 6, vocab_size] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) # TODO Replace values below with what was printed above. expected_slice = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4) @require_tf class TFRoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_basic(self): input_ids = tf.constant([[4, 10]]) emb1 = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6) emb = emb1(input_ids.shape) desired_weights = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(emb, desired_weights, atol=self.tolerance) def test_positional_emb_weights_against_roformer(self): desired_weights = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) emb1 = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512) emb1([2, 16, 512]) weights = emb1.weight[:3, :5] tf.debugging.assert_near(weights, desired_weights, atol=self.tolerance) @require_tf class TFRoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_apply_rotary_position_embeddings(self): # 2,12,16,64 query_layer = tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.float32), shape=(2, 12, 16, 64)) / 100 key_layer = -tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.float32), shape=(2, 12, 16, 64)) / 100 embed_positions = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64) sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :] query_layer, key_layer = TFRoFormerSelfAttention.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer ) desired_query_layer = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) desired_key_layer = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8], desired_query_layer, atol=self.tolerance) tf.debugging.assert_near(key_layer[0, 0, :6, :8], desired_key_layer, atol=self.tolerance)
transformers/tests/models/roformer/test_modeling_tf_roformer.py/0
{ "file_path": "transformers/tests/models/roformer/test_modeling_tf_roformer.py", "repo_id": "transformers", "token_count": 7875 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SeamlessM4T model.""" import copy import tempfile import unittest from transformers import SeamlessM4TConfig, is_speech_available, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.trainer_utils import set_seed from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, SeamlessM4TForTextToSpeech, SeamlessM4TForTextToText, SeamlessM4TModel, ) if is_speech_available(): from transformers import SeamlessM4TProcessor class SeamlessM4TModelTester: def __init__( self, parent, input_modality="speech", batch_size=2, seq_length=4, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_new_tokens=None, num_labels=3, num_choices=4, scope=None, vocab_size=20, t2u_vocab_size=20, hidden_size=6, num_hidden_layers=2, intermediate_size=6, max_position_embeddings=256, encoder_layers=2, decoder_layers=2, encoder_ffn_dim=6, decoder_ffn_dim=6, t2u_encoder_layers=2, t2u_decoder_layers=2, t2u_encoder_ffn_dim=6, t2u_decoder_ffn_dim=6, num_heads=2, vocoder_num_spkrs=5, vocoder_num_langs=5, upsample_initial_channel=32, unit_embed_dim=25, spkr_embed_dim=6, lang_embed_dim=6, num_conv_pos_embeddings=8, unit_hifi_gan_vocab_size=20, t2u_num_langs=0, t2u_max_new_tokens=25, t2u_offset_tgt_lang=0, vocoder_offset=0, ): self.parent = parent self.input_modality = input_modality self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.vocab_size = vocab_size self.t2u_vocab_size = t2u_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.max_position_embeddings = max_position_embeddings self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.encoder_ffn_dim = encoder_ffn_dim self.decoder_ffn_dim = decoder_ffn_dim self.t2u_encoder_layers = t2u_encoder_layers self.t2u_decoder_layers = t2u_decoder_layers self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim self.num_heads = num_heads self.num_attention_heads = num_heads self.vocoder_num_spkrs = vocoder_num_spkrs self.vocoder_num_langs = vocoder_num_langs self.upsample_initial_channel = upsample_initial_channel self.unit_embed_dim = unit_embed_dim self.spkr_embed_dim = spkr_embed_dim self.num_conv_pos_embeddings = num_conv_pos_embeddings self.lang_embed_dim = lang_embed_dim self.max_new_tokens = max_new_tokens self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size self.t2u_num_langs = t2u_num_langs self.t2u_max_new_tokens = t2u_max_new_tokens self.t2u_offset_tgt_lang = t2u_offset_tgt_lang self.vocoder_offset = vocoder_offset def prepare_config_and_inputs(self): if self.input_modality == "text": inputs = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) else: inputs = ids_tensor([self.batch_size, self.seq_length, 160], self.vocab_size - 1).float() input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1) lm_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, inputs, decoder_input_ids, input_mask, lm_labels def get_config(self): return SeamlessM4TConfig( hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, vocab_size=self.vocab_size, t2u_vocab_size=self.t2u_vocab_size, hidden_size=self.hidden_size, speech_encoder_layers=self.num_heads, speech_encoder_intermediate_size=self.intermediate_size, max_position_embeddings=self.max_position_embeddings, encoder_layers=self.encoder_layers, decoder_layers=self.decoder_layers, encoder_ffn_dim=self.encoder_ffn_dim, decoder_ffn_dim=self.decoder_ffn_dim, t2u_encoder_layers=self.t2u_encoder_layers, t2u_decoder_layers=self.t2u_decoder_layers, t2u_encoder_ffn_dim=self.t2u_encoder_ffn_dim, t2u_decoder_ffn_dim=self.t2u_decoder_ffn_dim, num_attention_heads=self.num_heads, encoder_attention_heads=self.num_heads, decoder_attention_heads=self.num_heads, t2u_encoder_attention_heads=self.num_heads, t2u_decoder_attention_heads=self.num_heads, speech_encoder_attention_heads=self.num_heads, unit_hifigan_vocab_vise=self.t2u_vocab_size, vocoder_num_spkrs=self.vocoder_num_spkrs, vocoder_num_langs=self.vocoder_num_langs, upsample_initial_channel=self.upsample_initial_channel, unit_embed_dim=self.unit_embed_dim, spkr_embed_dim=self.spkr_embed_dim, num_conv_pos_embeddings=self.num_conv_pos_embeddings, lang_embed_dim=self.lang_embed_dim, max_new_tokens=self.max_new_tokens, unit_hifi_gan_vocab_size=self.unit_hifi_gan_vocab_size, t2u_num_langs=self.t2u_num_langs, t2u_max_new_tokens=self.t2u_max_new_tokens, t2u_offset_tgt_lang=self.t2u_offset_tgt_lang, vocoder_offset=self.vocoder_offset, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, decoder_input_ids, input_mask, lm_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, decoder_input_ids, input_mask, lm_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model(self, config, input_ids, decoder_input_ids, input_mask, labels): model = SeamlessM4TModel(config=config) model.to(torch_device) model.eval() if self.input_modality == "text": result = model(input_ids=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) else: result = model(input_features=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids) result = model(input_features=input_ids, decoder_input_ids=decoder_input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) decoder_output = result.logits decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state if self.input_modality == "text": seq_length = self.seq_length else: # if speech, expected length has been subsampled. seq_length = model._compute_sub_sample_lengths_from_attention_mask(input_mask).max().item() self.parent.assertEqual(encoder_output.size(), (self.batch_size, seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, decoder_input_ids.shape[1], self.vocab_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.decoder_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, input_mask, lm_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True model = SeamlessM4TModel(config=config) model.to(torch_device) model.eval() # make sure no pad token in decoder_input_ids decoder_input_ids = torch.clamp(decoder_input_ids, config.pad_token_id + 1) # first forward pass outputs = model( input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=input_mask, use_cache=True ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([decoder_input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( input_ids, decoder_input_ids=next_input_ids, decoder_attention_mask=next_attention_mask, output_hidden_states=True, ) output_from_no_past = output_from_no_past["decoder_hidden_states"][0] output_from_past = model( input_ids, decoder_input_ids=next_tokens, decoder_attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["decoder_hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, input_mask, lm_labels, ) = config_and_inputs input_name = "input_ids" if self.input_modality == "text" else "input_features" inputs_dict = { input_name: input_ids, "attention_mask": input_mask, "decoder_input_ids": decoder_input_ids, "labels": lm_labels, } return config, inputs_dict @require_torch class SeamlessM4TModelWithSpeechInputTest(ModelTesterMixin, unittest.TestCase): is_encoder_decoder = True fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = False test_headmasking = False test_torchscript = False all_model_classes = ( ( SeamlessM4TModel, SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, ) if is_torch_available() else () ) all_generative_model_classes = (SeamlessM4TForSpeechToText,) if is_torch_available() else () def setUp(self): self.model_tester = SeamlessM4TModelTester(self, input_modality="speech") self.config_tester = ConfigTester(self, config_class=SeamlessM4TConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/hf-seamless-m4t-medium" model = SeamlessM4TModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", "adapter", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="SeamlessM4TSpeechEncoder doesn't have an embedding layer") def test_inputs_embeds(self): pass @unittest.skip(reason="SeamlessM4TSpeechEncoder doesn't have an embedding layer") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip( reason="Expected missing keys serve when using SeamlessM4TForXXX.from_pretrained from a checkpoint saved by SeamlessM4TModel.save_pretrained." ) def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip( reason="SeamlessM4TModel is base class but has actually a bigger architecture than seamlessM4T task-specific models." ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SeamlessM4TModel can takes input_ids or input_features") def test_forward_signature(self): pass @unittest.skip(reason="SeamlessM4T has no base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( reason="This architecure has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass def test_attention_outputs(self): # expected length is subsampled so need to change a bit this test if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) # no more chunk_length test for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) sub_sampled_length = ( model._compute_sub_sample_lengths_from_attention_mask(inputs_dict["attention_mask"]).max().item() ) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, sub_sampled_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # When training the model, the first speech encoder layer is sometimes skipped. # Setting the seed to always have the first layer. set_seed(0) super().test_retain_grad_hidden_states_attentions() @require_torch class SeamlessM4TModelWithTextInputTest( ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase ): is_encoder_decoder = True fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False test_resize_embeddings = True test_headmasking = False test_torchscript = False all_model_classes = ( ( SeamlessM4TModel, SeamlessM4TForTextToSpeech, SeamlessM4TForTextToText, ) if is_torch_available() else () ) all_generative_model_classes = (SeamlessM4TForTextToText,) if is_torch_available() else () pipeline_model_mapping = ( { "automatic-speech-recognition": SeamlessM4TForSpeechToText, "feature-extraction": SeamlessM4TModel, "summarization": SeamlessM4TForTextToText, "text-to-audio": SeamlessM4TForTextToSpeech, "text2text-generation": SeamlessM4TForTextToText, "translation": SeamlessM4TForTextToText, } if is_torch_available() else {} ) def setUp(self): self.model_tester = SeamlessM4TModelTester(self, input_modality="text") self.config_tester = ConfigTester(self, config_class=SeamlessM4TConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/hf-seamless-m4t-medium" model = SeamlessM4TModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", "adapter", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip( reason="Expected missing keys serve when using SeamlessM4TForXXX.from_pretrained from a checkpoint saved by SeamlessM4TModel.save_pretrained." ) def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip(reason="SeamlessM4TModel can take input_ids or input_features") def test_forward_signature(self): pass def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) @unittest.skip( reason="SeamlessM4TModel is base class but has actually a bigger architecture than seamlessM4T task-specific models." ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="SeamlessM4T has no base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( reason="In training model, the first encoder layer is sometimes skipped. Training is not supported yet, so the test is ignored." ) def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip( reason="This architecure has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass @require_torch class SeamlessM4TGenerationTest(unittest.TestCase): # test that non-standard generation works # test generation of: SeamlessM4TModel, SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, SeamlessM4TForTextToSpeech def setUp(self): self.speech_model_tester = SeamlessM4TModelTester(self, input_modality="speech") self.text_model_tester = SeamlessM4TModelTester(self, input_modality="text") self.tmpdirname = tempfile.mkdtemp() def update_generation(self, model): lang_code_to_id = { "fra": 4, "eng": 4, } generation_config = copy.deepcopy(model.generation_config) generation_config.__setattr__("text_decoder_lang_to_code_id", lang_code_to_id) generation_config.__setattr__("t2u_lang_code_to_id", lang_code_to_id) generation_config.__setattr__("vocoder_lang_code_to_id", lang_code_to_id) generation_config._from_model_config = False model.generation_config = generation_config def prepare_text_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_dict = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": "eng", "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_dict = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } return config, input_dict def prepare_speech_and_text_input(self): config, inputs, decoder_input_ids, input_mask, lm_labels = self.speech_model_tester.prepare_config_and_inputs() input_speech = { "input_features": inputs, "attention_mask": input_mask, "tgt_lang": "fra", "num_beams": 2, "do_sample": True, } config, inputs, decoder_input_ids, input_mask, lm_labels = self.text_model_tester.prepare_config_and_inputs() input_text = { "input_ids": inputs, "attention_mask": input_mask, "tgt_lang": "eng", "num_beams": 2, "do_sample": True, } return config, input_speech, input_text def factory_generation_speech_test(self, model, inputs): set_seed(0) output = model.generate(**inputs) return output def test_speech_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() model = SeamlessM4TModel(config=config) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) state_dict = model.state_dict() text_model = SeamlessM4TForTextToSpeech.from_pretrained(self.tmpdirname) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() output_text = self.factory_generation_speech_test(model, input_text) speech_model = SeamlessM4TForSpeechToSpeech.from_pretrained(self.tmpdirname) self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") output_speech = self.factory_generation_speech_test(model, input_speech) # test same text output from input text self.assertListEqual(output_original_text[0].ravel().tolist(), output_text[0].ravel().tolist()) self.assertListEqual(output_original_text[1].ravel().tolist(), output_text[1].ravel().tolist()) # test same speech output from input text # assertTrue because super long list makes this hang in case of failure self.assertTrue( output_original_speech[0].ravel().tolist() == output_speech[0].ravel().tolist(), "Speech generated was different", ) self.assertTrue( output_original_speech[1].ravel().tolist() == output_speech[1].ravel().tolist(), "Speech generated was different", ) def test_text_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() # to return speech input_speech["generate_speech"] = False input_text["generate_speech"] = False model = SeamlessM4TModel(config=config) self.update_generation(model) model.save_pretrained(self.tmpdirname) model.to(torch_device) model.eval() output_original_text = self.factory_generation_speech_test(model, input_text) output_original_speech = self.factory_generation_speech_test(model, input_speech) # other models don't need it input_speech.pop("generate_speech") input_text.pop("generate_speech") state_dict = model.state_dict() text_model = SeamlessM4TForTextToText.from_pretrained(self.tmpdirname) self.update_generation(text_model) text_model.to(torch_device) text_model.eval() for name, tensor in text_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist()) output_text = self.factory_generation_speech_test(text_model, input_text) speech_model = SeamlessM4TForSpeechToText.from_pretrained(self.tmpdirname) for name, tensor in speech_model.state_dict().items(): right_tensor = state_dict.get(name) self.assertEqual(tensor.tolist(), right_tensor.tolist(), f"Tensor {name}") self.update_generation(speech_model) speech_model.to(torch_device) speech_model.eval() output_speech = self.factory_generation_speech_test(speech_model, input_speech) # test same text output from input text self.assertListEqual(output_original_text[0].ravel().tolist(), output_text.ravel().tolist()) # test same speech output from input text self.assertListEqual(output_original_speech[0].ravel().tolist(), output_speech.ravel().tolist()) def test_generation(self): config, input_speech, input_text = self.prepare_speech_and_text_input() input_speech["num_beams"] = 3 input_speech["do_sample"] = True input_speech["num_return_sequences"] = 3 input_text["num_beams"] = 3 input_text["do_sample"] = True input_text["num_return_sequences"] = 3 for model_class in [SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, SeamlessM4TModel]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_speech) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_speech["input_features"].shape[0]) for model_class in [SeamlessM4TForTextToSpeech, SeamlessM4TForTextToText, SeamlessM4TModel]: model = model_class(config=config) self.update_generation(model) model.to(torch_device) model.eval() output = model.generate(**input_text) output = output[0] if isinstance(output, tuple) else output self.assertEqual(output.shape[0], 3 * input_text["input_ids"].shape[0]) @require_torch class SeamlessM4TModelIntegrationTest(unittest.TestCase): repo_id = "facebook/hf-seamless-m4t-medium" def assertListAlmostEqual(self, list1, list2, tol=1e-3): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) @cached_property def processor(self): return SeamlessM4TProcessor.from_pretrained(self.repo_id) @cached_property def input_text(self): # corresponds to "C'est un test." with seamlessM4T_medium checkpoint input_ids = torch.tensor([[256057, 152, 248116, 354, 159, 7356, 248075, 3]]) # fmt: skip input_ids = input_ids.to(torch_device) attention_mask = torch.ones_like(input_ids).to(torch_device) inputs = { "attention_mask": attention_mask, "input_ids": input_ids, } return inputs @cached_property def input_audio(self): set_seed(0) seq_len = 20000 sampling_rate = 16000 input_features = torch.rand((2, seq_len)) return self.processor(audios=[input_features.tolist()], sampling_rate=sampling_rate, return_tensors="pt").to( torch_device ) def factory_test_task(self, class1, class2, inputs, class1_kwargs, class2_kwargs): model1 = class1.from_pretrained(self.repo_id).to(torch_device) model2 = class2.from_pretrained(self.repo_id).to(torch_device) set_seed(0) output_1 = model1.generate(**inputs, **class1_kwargs) set_seed(0) output_2 = model2.generate(**inputs, **class2_kwargs) for key in output_1: if isinstance(output_1[key], torch.Tensor): if len(output_1[key].shape) == 0: self.assertEqual(output_1[key].item(), output_2[key].item()) else: self.assertListAlmostEqual(output_1[key].squeeze().tolist(), output_2[key].squeeze().tolist()) @slow def test_to_eng_text(self): model = SeamlessM4TModel.from_pretrained(self.repo_id).to(torch_device) # test text - tgt lang: eng expected_text_tokens = [3, 256047, 3291, 248116, 248066, 9, 7356, 248075, 3] # fmt: skip # fmt: off expected_unit_tokens = [ 2,10051,8980,8212,949,1270,4311,1123,5918,2333,5311,3882,2415,5284,1123,612,8816,6370,5386,7334,4345,5645, 9437,5748,1378,9818,4319,7968,7375,2909,9119,5151,8728,5335,3896,4013,8939,8885,6048,9530,3167,5833,1072,693, 431,9867,364,7909,4608,5938,1889,9984,7947,4944,6171,3767,9861,9169,1187,8365,4571,7635,7784,7635,800,2393, 32,5380,5852,8289,2530,2762,1833,2056,3553,4641,3553,5683,370,2288,1344,1518,7534,703,8359,7699,2 ] # fmt: on expected_wav_slice = [-3e-05, -0.0004, -0.00037, -0.00013, -6e-05, 0.00012, -0.00016, 0.00025, 7e-05, -3e-05] # fmt: skip set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="eng", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) # FOR NOW, only first units correspondance self.assertListEqual(expected_unit_tokens[:10], output.unit_sequences.squeeze().tolist()[:10]) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) @slow def test_to_swh_text(self): model = SeamlessM4TModel.from_pretrained(self.repo_id).to(torch_device) # test text - tgt lang: swh expected_text_tokens = [3, 256168, 1665, 188589, 7040, 248075, 3] # fmt: skip # fmt: off expected_unit_tokens = [ 2,10071,5729,9995,3089,7546,1204,1721,2532,4340,5623,3496,432,7730,9096,7677,3143,8211,6447,8399,4248,3565, 4529,7700,9308,217,6476,3485,9667,3194,8476,4923,5593,1148,4466,7416,4872,463,4872,253,2348,4640,3450,2133, 6318,2806,817,7613,2698,6563,8712,8344,9286,6878,6387,4281,6387,640,6387,3200,640,8355,640,6708,979,1738,2 ] # fmt: on expected_wav_slice = [1e-05, -7e-05, -4e-05, -4e-05, -6e-05, -9e-05, -0.0001, -2e-05, -7e-05, -2e-05] # fmt: skip set_seed(0) output = model.generate(**self.input_text, num_beams=1, tgt_lang="swh", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual(expected_unit_tokens[:10], output.unit_sequences.squeeze().tolist()[:10]) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) @slow def test_to_rus_speech(self): model = SeamlessM4TModel.from_pretrained(self.repo_id).to(torch_device) # test audio - tgt lang: rus expected_text_tokens = [3, 256147, 1197, 73565, 3413, 537, 233331, 248075, 3] # fmt: skip # fmt: off expected_unit_tokens = [ 2, 10067, 5729, 4798, 9631, 8378, 4446, 2393, 6901, 5983, 2817, 4629, 8532, 1991, 2931, 8576, 8857, 5936, 4317, 9000, 7740, 7995, 1225, 5980, 6094, 1420, 5373, 8771, 6600, 4487, 7029, 3630, 6740, 4870, 1483, 3003, 5585, 5511, 7465, 3222, 32, 6272, 1950, 3120, 5368, 639, 3713, 5935, 7943, 567, 6129, 6822, 1226, 5063, 9878, 7756, 8825, 1078, 5943, 457, 9282, 9668, 817, 7613, 2698, 6563, 8712, 8704, 9286, 8704, 6387, 4281, 6387, 640, 3200, 6387, 640, 8355, 6708, 979, 1738, 2 ] # fmt: on expected_wav_slice = [0.00013, 0.00012, 0.00014, 3e-05, 0.0, -6e-05, -0.00018, -0.00016, -0.00021, -0.00018] # fmt: skip set_seed(0) output = model.generate(**self.input_audio, num_beams=1, tgt_lang="rus", return_intermediate_token_ids=True) self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist()) self.assertListEqual(expected_unit_tokens[:10], output.unit_sequences.squeeze().tolist()[:10]) self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60]) @slow def test_text_to_text_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False} kwargs2 = { "tgt_lang": "eng", "output_hidden_states": True, "return_dict_in_generate": True, "output_scores": True, } self.factory_test_task(SeamlessM4TModel, SeamlessM4TForTextToText, self.input_text, kwargs1, kwargs2) @slow def test_speech_to_text_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False} kwargs2 = { "tgt_lang": "eng", "output_hidden_states": True, "return_dict_in_generate": True, "output_scores": True, } self.factory_test_task(SeamlessM4TModel, SeamlessM4TForSpeechToText, self.input_audio, kwargs1, kwargs2) @slow def test_speech_to_speech_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True} self.factory_test_task(SeamlessM4TModel, SeamlessM4TForSpeechToSpeech, self.input_audio, kwargs1, kwargs1) @slow def test_text_to_speech_model(self): kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True} self.factory_test_task(SeamlessM4TModel, SeamlessM4TForTextToSpeech, self.input_text, kwargs1, kwargs1)
transformers/tests/models/seamless_m4t/test_modeling_seamless_m4t.py/0
{ "file_path": "transformers/tests/models/seamless_m4t/test_modeling_seamless_m4t.py", "repo_id": "transformers", "token_count": 21270 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the SpeechT5 processors.""" import json import os import shutil import tempfile import unittest from transformers import is_speech_available, is_torch_available from transformers.models.speecht5 import SpeechT5Tokenizer from transformers.testing_utils import get_tests_dir, require_torch from transformers.utils import FEATURE_EXTRACTOR_NAME if is_speech_available() and is_torch_available(): from transformers import SpeechT5FeatureExtractor, SpeechT5Processor from .test_feature_extraction_speecht5 import floats_list SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_torch class SpeechT5ProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() tokenizer = SpeechT5Tokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "do_normalize": False, "num_mel_bins": 80, "hop_length": 16, "win_length": 64, "win_function": "hann_window", "fmin": 80, "fmax": 7600, "mel_floor": 1e-10, "reduction_factor": 2, "return_attention_mask": True, } self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") def get_tokenizer(self, **kwargs): return SpeechT5Tokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return SpeechT5FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = SpeechT5Processor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = SpeechT5Processor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = SpeechT5Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio=raw_speech, return_tensors="np") input_processor = processor(audio=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_feature_extractor_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio_target=raw_speech, return_tensors="np") input_processor = processor(audio_target=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text_target=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", )
transformers/tests/models/speecht5/test_processor_speecht5.py/0
{ "file_path": "transformers/tests/models/speecht5/test_processor_speecht5.py", "repo_id": "transformers", "token_count": 2860 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ( ImageProcessingTestMixin, prepare_image_inputs, ) if is_torch_available(): import torch from transformers.models.superpoint.modeling_superpoint import SuperPointKeypointDescriptionOutput if is_vision_available(): from transformers import SuperPointImageProcessor class SuperPointImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_grayscale=True, ): size = size if size is not None else {"height": 480, "width": 640} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_grayscale = do_grayscale def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_grayscale": self.do_grayscale, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) def prepare_keypoint_detection_output(self, pixel_values): max_number_keypoints = 50 batch_size = len(pixel_values) mask = torch.zeros((batch_size, max_number_keypoints)) keypoints = torch.zeros((batch_size, max_number_keypoints, 2)) scores = torch.zeros((batch_size, max_number_keypoints)) descriptors = torch.zeros((batch_size, max_number_keypoints, 16)) for i in range(batch_size): random_number_keypoints = np.random.randint(0, max_number_keypoints) mask[i, :random_number_keypoints] = 1 keypoints[i, :random_number_keypoints] = torch.rand((random_number_keypoints, 2)) scores[i, :random_number_keypoints] = torch.rand((random_number_keypoints,)) descriptors[i, :random_number_keypoints] = torch.rand((random_number_keypoints, 16)) return SuperPointKeypointDescriptionOutput( loss=None, keypoints=keypoints, scores=scores, descriptors=descriptors, mask=mask, hidden_states=None ) @require_torch @require_vision class SuperPointImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = SuperPointImageProcessor if is_vision_available() else None def setUp(self) -> None: super().setUp() self.image_processor_tester = SuperPointImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processing(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_grayscale")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 480, "width": 640}) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size={"height": 42, "width": 42} ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) @unittest.skip(reason="SuperPointImageProcessor is always supposed to return a grayscaled image") def test_call_numpy_4_channels(self): pass def test_input_image_properly_converted_to_grayscale(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs() pre_processed_images = image_processor.preprocess(image_inputs) for image in pre_processed_images["pixel_values"]: self.assertTrue(np.all(image[0, ...] == image[1, ...]) and np.all(image[1, ...] == image[2, ...])) @require_torch def test_post_processing_keypoint_detection(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs() pre_processed_images = image_processor.preprocess(image_inputs, return_tensors="pt") outputs = self.image_processor_tester.prepare_keypoint_detection_output(**pre_processed_images) def check_post_processed_output(post_processed_output, image_size): for post_processed_output, image_size in zip(post_processed_output, image_size): self.assertTrue("keypoints" in post_processed_output) self.assertTrue("descriptors" in post_processed_output) self.assertTrue("scores" in post_processed_output) keypoints = post_processed_output["keypoints"] all_below_image_size = torch.all(keypoints[:, 0] <= image_size[1]) and torch.all( keypoints[:, 1] <= image_size[0] ) all_above_zero = torch.all(keypoints[:, 0] >= 0) and torch.all(keypoints[:, 1] >= 0) self.assertTrue(all_below_image_size) self.assertTrue(all_above_zero) tuple_image_sizes = [(image.size[0], image.size[1]) for image in image_inputs] tuple_post_processed_outputs = image_processor.post_process_keypoint_detection(outputs, tuple_image_sizes) check_post_processed_output(tuple_post_processed_outputs, tuple_image_sizes) tensor_image_sizes = torch.tensor([image.size for image in image_inputs]).flip(1) tensor_post_processed_outputs = image_processor.post_process_keypoint_detection(outputs, tensor_image_sizes) check_post_processed_output(tensor_post_processed_outputs, tensor_image_sizes)
transformers/tests/models/superpoint/test_image_processing_superpoint.py/0
{ "file_path": "transformers/tests/models/superpoint/test_image_processing_superpoint.py", "repo_id": "transformers", "token_count": 2967 }
# coding=utf-8 # Copyright 2021 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np import transformers from transformers import is_flax_available from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_sentencepiece, require_tokenizers, slow, ) from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp import optax from flax.core.frozen_dict import unfreeze from flax.training.common_utils import onehot from flax.traverse_util import flatten_dict from transformers import FLAX_MODEL_MAPPING, ByT5Tokenizer, T5Config, T5Tokenizer from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.models.t5.modeling_flax_t5 import ( FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model, shift_tokens_right, ) class FlaxT5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): model = FlaxT5Model(config=config) result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)) def check_use_cache_forward_with_attn_mask( self, model_class_name, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(input_ids) # prevent fully zero'd out attention mask decoder_attention_mask = jnp.ones_like(decoder_attention_mask) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, ) outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_flax class FlaxT5ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxT5Model, FlaxT5ForConditionalGeneration) if is_flax_available() else () all_generative_model_classes = (FlaxT5ForConditionalGeneration,) if is_flax_available() else () is_encoder_decoder = True def setUp(self): self.model_tester = FlaxT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_use_cache_forward_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, *config_and_inputs) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_shift_right(self): decoder_start_token_id = 0 pad_token_id = 1 labels = np.arange(2, 102).reshape(5, 20) labels[:2, 15:] = -100 decoder_input_ids = shift_tokens_right(labels, pad_token_id, decoder_start_token_id) np_decoder_input_ids = np.array(decoder_input_ids) padded_slice = np_decoder_input_ids[:2, (15 + 1) :] self.assertTrue((padded_slice == 1).all()) not_padded_slice = np_decoder_input_ids[2:, 1:] rolled_labels = np.roll(labels[2:], 1)[:, 1:] self.assertTrue((not_padded_slice == rolled_labels).all()) self.assertTrue((np_decoder_input_ids[:, 0] == 0).all()) # overwrite since special base model prefix is used def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") class FlaxT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = 0 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, is_encoder_decoder=False, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = FlaxT5EncoderModel(config=config) result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class FlaxT5EncoderOnlyModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxT5EncoderModel,) if is_flax_available() else () is_encoder_decoder = False def setUp(self): self.model_tester = FlaxT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) # overwrite since special base model prefix is used def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params)) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite since special base model prefix is used @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @require_sentencepiece @require_tokenizers @require_flax class FlaxT5ModelIntegrationTests(unittest.TestCase): @slow def test_small_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -19.0845 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = FlaxT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small") tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -59.0293 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.9.1 >>> path_to_byt5_small_checkpoint = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None) >>> vocab = t5.data.ByteVocabulary() >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = FlaxT5ForConditionalGeneration.from_pretrained("google/byt5-small") tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="np").input_ids labels = tokenizer("Hi I am", return_tensors="np").input_ids decoder_input_ids = shift_tokens_right(labels, model.config.pad_token_id, model.config.decoder_start_token_id) logits = model(input_ids, decoder_input_ids=decoder_input_ids).logits loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean() mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -60.7397 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_generation(self): model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") model.config.max_length = 8 model.config.num_beams = 1 model.config.do_sample = False tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="np").input_ids sequences = model.generate(input_ids).sequences output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_small_generation_bfloat16(self): model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small", dtype=jnp.bfloat16) model.config.max_length = 8 model.config.num_beams = 1 model.config.do_sample = False tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="np").input_ids sequences = model.generate(input_ids).sequences output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_summarization(self): model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-base") tok = T5Tokenizer.from_pretrained("google-t5/t5-base") FRANCE_ARTICLE = ( # @noqa "Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( "(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( "New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) expected_summaries = [ 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a' " cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one" " magazine says . all 150 on board were killed in the crash .", "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a" " preliminary examination into the situation in the occupied Palestinian territory . as members of the" " court, Palestinians may be subject to counter-charges as well .", "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:" " the debate that has already begun since the announcement of the new framework will likely result in more" " heat than light . he says the new framework would reduce Iran's low-enriched uranium stockpile and cut" " centrifuges . miller: if it had been, there would have been no Iranian team at the table .", "prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two" ' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10' " times, with nine of her marriages occurring between 1999 and 2002 .", ] dct = tok( ["summarize: " + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], padding="max_length", truncation=True, return_tensors="np", ) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, do_sample=False, early_stopping=True, ).sequences decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( expected_summaries, decoded, )
transformers/tests/models/t5/test_modeling_flax_t5.py/0
{ "file_path": "transformers/tests/models/t5/test_modeling_flax_t5.py", "repo_id": "transformers", "token_count": 23924 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch TimeSformer model.""" import copy import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) if is_vision_available(): from transformers import VideoMAEImageProcessor class TimesformerModelTester: def __init__( self, parent, batch_size=13, image_size=10, num_channels=3, patch_size=2, num_frames=2, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_labels=10, initializer_range=0.02, attention_type="divided_space_time", scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.patch_size = patch_size self.num_frames = num_frames self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.attention_type = attention_type self.initializer_range = initializer_range self.scope = scope self.num_labels = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token self.num_patches_per_frame = (image_size // patch_size) ** 2 self.seq_length = (num_frames) * self.num_patches_per_frame + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): config = TimesformerConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, attention_type=self.attention_type, ) config.num_labels = self.num_labels return config def create_and_check_model(self, config, pixel_values, labels): model = TimesformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_video_classification(self, config, pixel_values, labels): model = TimesformerForVideoClassification(config) model.to(torch_device) model.eval() result = model(pixel_values) # verify the logits shape expected_shape = torch.Size((self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape, expected_shape) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class TimesformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as TimeSformer does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = TimesformerModelTester(self) self.config_tester = ConfigTester( self, config_class=TimesformerConfig, has_text_modality=False, hidden_size=37 ) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING): inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_video_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/timesformer-base-finetuned-k400" model = TimesformerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model has no attentions") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: seq_len = self.model_tester.seq_length num_frames = self.model_tester.num_frames inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class TimesformerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def test_inference_for_video_classification(self): model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400").to( torch_device ) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video[:8], return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.3016, -0.7713, -0.4205]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/timesformer/test_modeling_timesformer.py/0
{ "file_path": "transformers/tests/models/timesformer/test_modeling_timesformer.py", "repo_id": "transformers", "token_count": 5966 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VideoMAE model.""" import copy import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_sdpa, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) if is_vision_available(): from transformers import VideoMAEImageProcessor class VideoMAEModelTester: def __init__( self, parent, batch_size=13, image_size=10, num_channels=3, patch_size=2, tubelet_size=2, num_frames=2, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, mask_ratio=0.9, scope=None, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.patch_size = patch_size self.tubelet_size = tubelet_size self.num_frames = num_frames self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.mask_ratio = mask_ratio self.scope = scope self.attn_implementation = attn_implementation # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame self.num_patches_per_frame = (image_size // patch_size) ** 2 self.seq_length = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos self.num_masks = int(mask_ratio * self.seq_length) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return VideoMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, decoder_hidden_size=self.hidden_size, decoder_intermediate_size=self.intermediate_size, decoder_num_attention_heads=self.num_attention_heads, decoder_num_hidden_layers=self.num_hidden_layers, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, pixel_values, labels): model = VideoMAEModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = VideoMAEForPreTraining(config) model.to(torch_device) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch mask = torch.ones((self.num_masks,)) mask = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))]) bool_masked_pos = mask.expand(self.batch_size, -1).bool() result = model(pixel_values, bool_masked_pos) # model only returns predictions for masked patches num_masked_patches = mask.sum().item() decoder_num_labels = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VideoMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as VideoMAE does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VideoMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=VideoMAEConfig, has_text_modality=False, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch mask = torch.ones((self.model_tester.num_masks,)) mask = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) batch_size = inputs_dict["pixel_values"].shape[0] bool_masked_pos = mask.expand(batch_size, -1).bool() inputs_dict["bool_masked_pos"] = bool_masked_pos.to(torch_device) if return_labels: if model_class in [ *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict @unittest.skip("`mse_cpu` not implemented for 'BFloat16'") @require_torch_sdpa def test_eager_matches_sdpa_inference_1_bfloat16(self): pass def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="VideoMAE does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "MCG-NJU/videomae-base" model = VideoMAEModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not have attentions") else: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks seq_len = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(hidden_states), expected_num_layers) num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks seq_length = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) @require_torch @require_vision class VideoMAEModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def test_inference_for_video_classification(self): model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics").to( torch_device ) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_for_pretraining(self): model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short").to(torch_device) image_processor = self.default_image_processor video = prepare_video() inputs = image_processor(video, return_tensors="pt").to(torch_device) # add boolean mask, indicating which patches to mask local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") inputs["bool_masked_pos"] = torch.load(local_path) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=torch_device ) self.assertEqual(outputs.logits.shape, expected_shape) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) # verify the loss (`config.norm_pix_loss` = `True`) expected_loss = torch.tensor([0.5142], device=torch_device) torch.testing.assert_close(outputs.loss, expected_loss, rtol=1e-4, atol=1e-4) # verify the loss (`config.norm_pix_loss` = `False`) model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short", norm_pix_loss=False).to( torch_device ) with torch.no_grad(): outputs = model(**inputs) expected_loss = torch.tensor(torch.tensor([0.6469]), device=torch_device) torch.testing.assert_close(outputs.loss, expected_loss, rtol=1e-4, atol=1e-4)
transformers/tests/models/videomae/test_modeling_videomae.py/0
{ "file_path": "transformers/tests/models/videomae/test_modeling_videomae.py", "repo_id": "transformers", "token_count": 7603 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VitMatte model.""" import unittest from huggingface_hub import hf_hub_download from transformers import VitMatteConfig from transformers.testing_utils import ( require_timm, require_torch, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import VitDetConfig, VitMatteForImageMatting if is_vision_available(): from PIL import Image from transformers import VitMatteImageProcessor class VitMatteModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=16, num_channels=4, is_training=True, use_labels=False, hidden_size=2, num_hidden_layers=2, num_attention_heads=2, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, scope=None, out_features=["stage1"], fusion_hidden_sizes=[128, 64, 32, 16], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.out_features = out_features self.fusion_hidden_sizes = fusion_hidden_sizes self.seq_length = (self.image_size // self.patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: raise NotImplementedError("Training is not yet supported") config = self.get_config() return config, pixel_values, labels def get_backbone_config(self): return VitDetConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_size=self.hidden_size, is_training=self.is_training, hidden_act=self.hidden_act, out_features=self.out_features, ) def get_config(self): return VitMatteConfig( backbone_config=self.get_backbone_config(), backbone=None, hidden_size=self.hidden_size, fusion_hidden_sizes=self.fusion_hidden_sizes, ) def create_and_check_model(self, config, pixel_values, labels): model = VitMatteForImageMatting(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.alphas.shape, (self.batch_size, 1, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VitMatteModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as VitMatte does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (VitMatteForImageMatting,) if is_torch_available() else () pipeline_model_mapping = {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VitMatteModelTester(self) self.config_tester = ConfigTester( self, config_class=VitMatteConfig, has_text_modality=False, hidden_size=37, common_properties=["hidden_size"], ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="VitMatte does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Training is not yet supported") def test_training(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ViTMatte does not support input and output embeddings") def test_model_get_set_embeddings(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "hustvl/vitmatte-small-composition-1k" model = VitMatteForImageMatting.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="ViTMatte does not support retaining gradient on attention logits") def test_retain_grad_hidden_states_attentions(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [2, 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True print("Hello we're here") check_hidden_states_output(inputs_dict, config, model_class) @require_timm def test_backbone_selection(self): def _validate_backbone_init(): for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() if model.__class__.__name__ == "VitMatteForImageMatting": # Confirm out_indices propogated to backbone self.assertEqual(len(model.backbone.out_indices), 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_pretrained_backbone = True config.backbone_config = None config.backbone_kwargs = {"out_indices": [-2, -1]} # Force load_backbone path config.is_hybrid = False # Load a timm backbone config.backbone = "resnet18" config.use_timm_backbone = True _validate_backbone_init() # Load a HF backbone config.backbone = "facebook/dinov2-small" config.use_timm_backbone = False _validate_backbone_init() @require_torch class VitMatteModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): processor = VitMatteImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k") model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k").to(torch_device) filepath = hf_hub_download( repo_id="hf-internal-testing/image-matting-fixtures", filename="image.png", repo_type="dataset" ) image = Image.open(filepath).convert("RGB") filepath = hf_hub_download( repo_id="hf-internal-testing/image-matting-fixtures", filename="trimap.png", repo_type="dataset" ) trimap = Image.open(filepath).convert("L") # prepare image + trimap for the model inputs = processor(images=image, trimaps=trimap, return_tensors="pt").to(torch_device) with torch.no_grad(): alphas = model(**inputs).alphas expected_shape = torch.Size((1, 1, 640, 960)) self.assertEqual(alphas.shape, expected_shape) expected_slice = torch.tensor( [[0.9977, 0.9987, 0.9990], [0.9980, 0.9998, 0.9998], [0.9983, 0.9998, 0.9998]], device=torch_device ) torch.testing.assert_close(alphas[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/vitmatte/test_modeling_vitmatte.py/0
{ "file_path": "transformers/tests/models/vitmatte/test_modeling_vitmatte.py", "repo_id": "transformers", "token_count": 4468 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Wav2Vec2 model.""" import math import multiprocessing import os import pickle import tempfile import traceback import unittest import numpy as np from datasets import load_dataset from pytest import mark from transformers import Wav2Vec2Config, is_torch_available from transformers.testing_utils import ( CaptureLogger, cleanup, is_flaky, is_pt_flax_cross_test, is_pyctcdecode_available, is_torchaudio_available, require_flash_attn, require_pyctcdecode, require_soundfile, require_torch, require_torch_gpu, require_torchaudio, run_test_in_subprocess, slow, torch_device, ) from transformers.utils import is_torch_fx_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from safetensors.torch import save_file as safe_save_file from transformers import ( Wav2Vec2FeatureExtractor, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2ForSequenceClassification, Wav2Vec2ForXVector, Wav2Vec2Model, Wav2Vec2Processor, ) from transformers.models.wav2vec2.modeling_wav2vec2 import ( WAV2VEC2_ADAPTER_PT_FILE, WAV2VEC2_ADAPTER_SAFE_FILE, Wav2Vec2GumbelVectorQuantizer, _compute_mask_indices, _sample_negative_indices, ) if is_torchaudio_available(): import torchaudio if is_pyctcdecode_available(): import pyctcdecode.decoder from transformers import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm import processing_wav2vec2_with_lm if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) ds = load_dataset( "mozilla-foundation/common_voice_11_0", "es", split="test", streaming=True, trust_remote_code=True ) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits # use a spawn pool, which should trigger a warning if different than fork with CaptureLogger(pyctcdecode.decoder.logger) as cl, multiprocessing.get_context("spawn").Pool(1) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") # force batch_decode to internally create a spawn pool, which should trigger a warning if different than fork multiprocessing.set_start_method("spawn", force=True) with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl: transcription = processor.batch_decode(logits.cpu().numpy()).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class Wav2Vec2ModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return Wav2Vec2Config( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_with_attn_adapter(self, config, input_values, attention_mask): config.adapter_attn_dim = 16 model = Wav2Vec2ForCTC(config=config) self.parent.assertIsNotNone(model._get_adapters()) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.output_seq_length, self.vocab_size)) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Wav2Vec2ForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Wav2Vec2ForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Wav2Vec2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2ForSequenceClassification, "automatic-speech-recognition": Wav2Vec2ForCTC, "feature-extraction": Wav2Vec2Model, "fill-mask": Wav2Vec2ForMaskedLM, } if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Model has no inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Model has input_values instead of input_ids") def test_forward_signature(self): pass @unittest.skip(reason="Model has no tokens embeds") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Model has no inputs_embeds") def test_model_get_set_embeddings(self): pass @is_pt_flax_cross_test @unittest.skip(reason="Non-rubst architecture does not exist in Flax") def test_equivalence_flax_to_pt(self): pass @is_pt_flax_cross_test @unittest.skip(reason="Non-rubst architecture does not exist in Flax") def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) # Wav2Vec2 cannot be torchscripted because of group norm. def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): # TODO: fix it self.skipTest(reason="torch 2.1 breaks torch fx tests for wav2vec2/hubert.") if not is_torch_fx_available() or not self.fx_compatible: self.skipTest(reason="torch fx not available or not compatible with this model") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) if ( isinstance(model, Wav2Vec2ForSequenceClassification) and not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() @unittest.skip( "Need to investigate why config.do_stable_layer_norm is set to False here when it doesn't seem to be supported" ) def test_flax_from_pt_safetensors(self): return @require_torch class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForXVector, ) if is_torch_available() else () ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @is_flaky( description="The `codevector_idx` computed with `argmax()` in `Wav2Vec2GumbelVectorQuantizer.forward` is not stable." ) def test_batching_equivalence(self): super().test_batching_equivalence() def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_model_with_attn_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_attn_adapter(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Model has no input_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Model has input_values instead of input_ids") def test_forward_signature(self): pass @unittest.skip(reason="Model has no token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Model has no input_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_model_for_pretraining(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = Wav2Vec2ForPreTraining(config).to(torch_device) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss # more losses mask_time_indices[:, : mask_time_indices.shape[-1] // 2] = True sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices.cpu().numpy()) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss_more_masked = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss # loss_more_masked has to be bigger or equal loss since more masked inputs have to be predicted self.assertTrue(loss.detach().item() <= loss_more_masked.detach().item()) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_load_and_set_attn_adapter(self): processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) def get_logits(model, input_features): model = model.to(torch_device) batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", ) with torch.no_grad(): logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits return logits input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]] model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter", target_lang="it") logits = get_logits(model, input_features) model_2 = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter") model_2.load_adapter("it") logits_2 = get_logits(model_2, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) # test that loading adapter weights with mismatched vocab sizes can be loaded def test_load_target_lang_with_mismatched_size(self): processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) def get_logits(model, input_features): model = model.to(torch_device) batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", ) with torch.no_grad(): logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits return logits input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]] model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-adapter", target_lang="fr", ignore_mismatched_sizes=True ) logits = get_logits(model, input_features) model_2 = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter") model_2.load_adapter("fr") logits_2 = get_logits(model_2, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) def test_load_attn_adapter(self): processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) def get_logits(model, input_features): model = model.to(torch_device) batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt", ) with torch.no_grad(): logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits return logits input_features = [np.random.random(16_000 * s) for s in [1, 3, 2, 6]] model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2", adapter_attn_dim=16) with tempfile.TemporaryDirectory() as tempdir: model.save_pretrained(tempdir) model = Wav2Vec2ForCTC.from_pretrained(tempdir) logits = get_logits(model, input_features) adapter_weights = model._get_adapters() # save safe weights safe_filepath = os.path.join(tempdir, WAV2VEC2_ADAPTER_SAFE_FILE.format("eng")) safe_save_file(adapter_weights, safe_filepath, metadata={"format": "pt"}) model.load_adapter("eng") model.load_adapter("eng", use_safetensors=True) with self.assertRaises(OSError): model.load_adapter("eng", use_safetensors=False) with self.assertRaises(Exception): model.load_adapter("ita", use_safetensors=True) logits_2 = get_logits(model, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) with tempfile.TemporaryDirectory() as tempdir: model.save_pretrained(tempdir) model = Wav2Vec2ForCTC.from_pretrained(tempdir) logits = get_logits(model, input_features) adapter_weights = model._get_adapters() # save pt weights pt_filepath = os.path.join(tempdir, WAV2VEC2_ADAPTER_PT_FILE.format("eng")) torch.save(adapter_weights, pt_filepath) model.load_adapter("eng") model.load_adapter("eng", use_safetensors=False) with self.assertRaises(OSError): model.load_adapter("eng", use_safetensors=True) logits_2 = get_logits(model, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) model = Wav2Vec2ForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2-adapter") logits = get_logits(model, input_features) model.load_adapter("eng") model.load_adapter("eng", use_safetensors=False) model.load_adapter("eng", use_safetensors=True) logits_2 = get_logits(model, input_features) torch.testing.assert_close(logits, logits_2, rtol=1e-3, atol=1e-3) @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) @require_torch class Wav2Vec2UtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) def test_compute_perplexity(self): probs = torch.arange(100, device=torch_device).reshape(2, 5, 10) / 100 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) # mask half of the input mask = torch.ones((2,), device=torch_device, dtype=torch.bool) mask[0] = 0 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 sequence = torch.div( torch.arange(sequence_length * hidden_size, device=torch_device), hidden_size, rounding_mode="floor" ) features = sequence.view(sequence_length, hidden_size) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # sample negative indices sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 # second half of last input tensor is padded mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 sequence = torch.div( torch.arange(sequence_length * hidden_size, device=torch_device), hidden_size, rounding_mode="floor" ) features = sequence.view(sequence_length, hidden_size) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # replace masked feature vectors with -100 to test that those are not sampled features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) # sample negative indices sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @require_soundfile @slow class Wav2Vec2ModelIntegrationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device, gc_collect=True) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test", trust_remote_code=True) return ds[:num_samples] def test_inference_ctc_normal(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_robust_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_inference_integration(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) np.random.seed(4) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # cosine similarity of model is all > 0.5 as model is # pre-trained on contrastive loss # fmt: off expected_cosine_sim_masked = torch.tensor([ 0.8523, 0.5860, 0.6905, 0.5557, 0.7456, 0.5249, 0.6639, 0.7654, 0.7565, 0.8167, 0.8222, 0.7960, 0.8034, 0.8166, 0.8310, 0.8263, 0.8274, 0.8258, 0.8179, 0.8412, 0.8536, 0.5098, 0.4728, 0.6461, 0.4498, 0.6002, 0.5774, 0.6457, 0.7123, 0.5668, 0.6866, 0.4960, 0.6293, 0.7423, 0.7419, 0.7526, 0.7768, 0.4898, 0.5393, 0.8183 ], device=torch_device) # fmt: on torch.testing.assert_close(cosine_sim_masked, expected_cosine_sim_masked, rtol=1e-3, atol=1e-3) def test_inference_pretrained(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # ... now compare to randomly initialized model config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-base") model_rand = Wav2Vec2ForPreTraining(config).to(torch_device).eval() with torch.no_grad(): outputs_rand = model_rand( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim_rand = torch.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states, dim=-1 ) # retrieve cosine sim of masked features cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] # a pretrained wav2vec2 model has learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states > 0.5 # a random wav2vec2 model has not learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states is very likely < 0.1 self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_loss_pretraining(self): model = Wav2Vec2ForPreTraining.from_pretrained( "facebook/wav2vec2-base", attention_dropout=0.0, feat_proj_dropout=0.0, hidden_dropout=0.0, layerdrop=0.0, ) model.to(torch_device).train() feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) np.random.seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices( mask_time_indices.shape, model.config.num_negatives, mask_time_indices ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ) # check diversity loss num_codevectors = model.config.num_codevectors_per_group * model.config.num_codevector_groups diversity_loss = (num_codevectors - outputs.codevector_perplexity) / num_codevectors self.assertTrue(abs(diversity_loss.item() - 0.9538) < 1e-3) # check overall loss (contrastive loss + diversity loss) expected_loss = 116.7094 self.assertTrue(abs(outputs.loss.item() - expected_loss) < 1e-3) def test_inference_keyword_spotting(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ks").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ks") input_data = self._load_superb("ks", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [7, 6, 10, 9] # s3prl logits for the same batch expected_logits = torch.tensor([6.1186, 11.8961, 10.2931, 6.0898], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-2, atol=1e-2) def test_inference_intent_classification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ic").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ic") input_data = self._load_superb("ic", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits_action, predicted_ids_action = torch.max(outputs.logits[:, :6], dim=-1) predicted_logits_object, predicted_ids_object = torch.max(outputs.logits[:, 6:20], dim=-1) predicted_logits_location, predicted_ids_location = torch.max(outputs.logits[:, 20:24], dim=-1) expected_labels_action = [0, 0, 2, 3] expected_logits_action = torch.tensor([0.4568, 11.0848, 1.6621, 9.3841], device=torch_device) expected_labels_object = [3, 10, 3, 4] expected_logits_object = torch.tensor([1.5322, 10.7094, 5.2469, 22.1318], device=torch_device) expected_labels_location = [0, 0, 0, 1] expected_logits_location = torch.tensor([1.5335, 6.5096, 10.5704, 11.0569], device=torch_device) self.assertListEqual(predicted_ids_action.tolist(), expected_labels_action) self.assertListEqual(predicted_ids_object.tolist(), expected_labels_object) self.assertListEqual(predicted_ids_location.tolist(), expected_labels_location) torch.testing.assert_close(predicted_logits_action, expected_logits_action, rtol=1e-2, atol=1e-2) torch.testing.assert_close(predicted_logits_object, expected_logits_object, rtol=1e-2, atol=1e-2) torch.testing.assert_close(predicted_logits_location, expected_logits_location, rtol=1e-2, atol=1e-2) def test_inference_speaker_identification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-sid").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-sid") input_data = self._load_superb("si", 4) output_logits = [] with torch.no_grad(): for example in input_data["speech"]: input = processor(example, return_tensors="pt", padding=True) output = model(input.input_values.to(torch_device), attention_mask=None) output_logits.append(output.logits[0]) output_logits = torch.stack(output_logits) predicted_logits, predicted_ids = torch.max(output_logits, dim=-1) expected_labels = [251, 1, 1, 3] # s3prl logits for the same batch expected_logits = torch.tensor([37.5627, 71.6362, 64.2419, 31.7778], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-2, atol=1e-2) def test_inference_emotion_recognition(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-er").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-er") input_data = self._load_superb("er", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [1, 1, 2, 2] # s3prl logits for the same batch expected_logits = torch.tensor([2.1722, 3.0779, 8.0287, 6.6797], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) torch.testing.assert_close(predicted_logits, expected_logits, rtol=1e-2, atol=1e-2) def test_phoneme_recognition(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "ɐ m æ n s ɛ d t ə ð ə j uː n ɪ v ɚ s s ɚ aɪ ɛ ɡ z ɪ s t", "s w ɛ t k ʌ v ɚ d b ɹ iː ɔ n z b ɑː d i t ɹ ɪ k l ɪ ŋ ɪ n t ə ð ə t aɪ t l oɪ n k l ɑː θ ð æ w ʌ z ð ɪ oʊ" " n l i ɡ ɑːɹ m ə n t h iː w ɔːɹ", "ð ə k aɪ t ɔ n h ɪ z tʃ ɛ s t s t ɪ l d ɹ ɪ p ɪ ŋ b l ʌ d ð ɪ eɪ k ʌ v h ɪ z oʊ v ɚ s t ɹ eɪ n d aɪ z iː" " v ə n ð ə s ɔːɹ ɹ ɪ ŋ ɐ ɹ iː n ɐ ɚ ɹ aʊ n d h ɪ m w ɪ ð ə θ aʊ z ə n d z ʌ v s p ɛ k t eɪ ɾ ɚ z w ɜː t ɹ" " ɪ v ɪ æ l ᵻ ɾ i z n ɑː t w ɜː θ θ ɪ ŋ k ɪ ŋ ɐ b aʊ t", "h ɪ z ɪ n s t ə n t v p æ n ɪ k w ʌ z f ɑː l oʊ d b aɪ ɐ s m ɔː l ʃ ɑːɹ p b l oʊ h aɪ ɔ n h ɪ z tʃ ɛ s t", ] # should correspond to =>: # [ # "a man said to the universe sir i exist", # "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", # "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about", # "his instant panic was followed by a small sharp blow high on his chest", # ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm(self): ds = load_dataset( "mozilla-foundation/common_voice_11_0", "es", split="test", streaming=True, trust_remote_code=True ) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits transcription = processor.batch_decode(logits.cpu().numpy()).text self.assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm_pool(self): ds = load_dataset( "mozilla-foundation/common_voice_11_0", "es", split="test", streaming=True, trust_remote_code=True ) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits # test user-managed pool with multiprocessing.get_context("fork").Pool(2) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text self.assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") # user-managed pool + num_processes should trigger a warning with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl, multiprocessing.get_context("fork").Pool( 2 ) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool, num_processes=2).text self.assertIn("num_process", cl.out) self.assertIn("it will be ignored", cl.out) self.assertEqual(transcription[0], "habitan aguas poco profundas y rocosas") @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm_invalid_pool(self): run_test_in_subprocess(test_case=self, target_func=_test_wav2vec2_with_lm_invalid_pool, inputs=None) def test_inference_diarization(self): model = Wav2Vec2ForAudioFrameClassification.from_pretrained("anton-l/wav2vec2-base-superb-sd").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (outputs.logits > 0).long() # s3prl logits for the same batch expected_logits = torch.tensor( [ [[-5.2807, -5.1272], [-5.4059, -4.7757], [-5.2764, -4.9621], [-5.0117, -4.5851]], [[-1.7643, -0.5462], [-1.7369, -0.2649], [-1.5066, -0.6200], [-4.5703, -2.4863]], [[-0.8656, -0.4783], [-0.8899, -0.3289], [-0.9267, -0.5781], [-0.7817, -0.4619]], [[-4.8625, -2.5316], [-5.2339, -2.2155], [-4.9835, -2.0344], [-4.4727, -1.8421]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 555) self.assertEqual(labels[0, :, 1].sum(), 299) torch.testing.assert_close(outputs.logits[:, :4], expected_logits, rtol=1e-2, atol=1e-2) def test_inference_speaker_verification(self): model = Wav2Vec2ForXVector.from_pretrained("anton-l/wav2vec2-base-superb-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1).cpu() cosine_sim = torch.nn.CosineSimilarity(dim=-1) # id10002 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).numpy(), 0.9758, 3) # id10006 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).numpy(), 0.7579, 3) # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).numpy(), 0.7594, 3) self.assertAlmostEqual(outputs.loss.item(), 17.7963, 2) @require_torchaudio def test_inference_mms_1b_all(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/mms-1b-all").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/mms-1b-all") LANG_MAP = {"it": "ita", "es": "spa", "fr": "fra", "en": "eng"} def run_model(lang): ds = load_dataset( "mozilla-foundation/common_voice_11_0", lang, split="test", streaming=True, trust_remote_code=True ) sample = next(iter(ds)) wav2vec2_lang = LANG_MAP[lang] model.load_adapter(wav2vec2_lang) processor.tokenizer.set_target_lang(wav2vec2_lang) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() inputs = processor(resampled_audio, sampling_rate=16_000, return_tensors="pt") input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask).logits ids = torch.argmax(outputs, dim=-1)[0] transcription = processor.decode(ids) return transcription TRANSCRIPTIONS = { "it": "il libro ha suscitato molte polemiche a causa dei suoi contenuti", "es": "habitan aguas poco profundas y rocosas", "fr": "ce dernier est volé tout au long de l'histoire romaine", "en": "joe keton disapproved of films and buster also had reservations about the media", } for lang in LANG_MAP.keys(): assert run_model(lang) == TRANSCRIPTIONS[lang] @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_inference_ctc_fa2(self): model_fa = Wav2Vec2ForCTC.from_pretrained( "facebook/wav2vec2-base-960h", attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16 ) model_fa.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model_fa(input_values.to(torch.bfloat16)).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_inference_ctc_fa2_batched(self): model_fa = Wav2Vec2ForCTC.from_pretrained( "facebook/wav2vec2-base-960h", attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16 ) model_fa.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True, return_attention_mask=True) inputs = inputs.to(torch_device) with torch.no_grad(): logits = model_fa(inputs.input_values.to(torch.bfloat16), attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
transformers/tests/models/wav2vec2/test_modeling_wav2vec2.py/0
{ "file_path": "transformers/tests/models/wav2vec2/test_modeling_wav2vec2.py", "repo_id": "transformers", "token_count": 39586 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import inspect import tempfile import unittest import transformers from transformers import WhisperConfig, is_flax_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from transformers.utils import cached_property from transformers.utils.import_utils import is_datasets_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_datasets_available(): import datasets from datasets import load_dataset if is_flax_available(): import jax import numpy as np from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import ( FLAX_MODEL_MAPPING, FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, WhisperFeatureExtractor, WhisperProcessor, ) from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.models.whisper.modeling_flax_whisper import sinusoidal_embedding_init @require_flax class FlaxWhisperModelTester: config_cls = WhisperConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=False, vocab_size=99, d_model=16, decoder_attention_heads=4, decoder_ffn_dim=16, decoder_layers=2, encoder_attention_heads=4, encoder_ffn_dim=16, encoder_layers=2, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=70, max_source_positions=30, max_target_positions=40, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = encoder_layers self.num_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_seq_length = seq_length // 2 self.decoder_seq_length = 1 self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens def prepare_config_and_inputs_for_common(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = np.array(self.batch_size * [[self.decoder_start_token_id]]) config = WhisperConfig( vocab_size=self.vocab_size, num_mel_bins=self.num_mel_bins, decoder_start_token_id=self.decoder_start_token_id, is_encoder_decoder=True, activation_function=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, pad_token_id=self.pad_token_id, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, tie_word_embeddings=True, d_model=self.d_model, decoder_attention_heads=self.decoder_attention_heads, decoder_ffn_dim=self.decoder_ffn_dim, decoder_layers=self.decoder_layers, encoder_attention_heads=self.encoder_attention_heads, encoder_ffn_dim=self.encoder_ffn_dim, encoder_layers=self.encoder_layers, suppress_tokens=self.suppress_tokens, ) inputs_dict = prepare_whisper_inputs_dict(config, input_features, decoder_input_ids) return config, inputs_dict def prepare_whisper_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape, dtype=np.int8), np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id).astype(np.int8), ], axis=-1, ) return { "input_features": input_ids, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } def partialclass(cls, *args, **kwargs): class NewCls(cls): __init__ = functools.partialmethod(cls.__init__, *args, **kwargs) return NewCls def make_partial_class(full_class, *args, **kwargs): partial_class = partialclass(full_class, *args, **kwargs) partial_class.__name__ = full_class.__name__ partial_class.__module__ = full_class.__module__ return partial_class @require_flax class FlaxWhisperModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxWhisperForConditionalGeneration, FlaxWhisperModel) if is_flax_available() else () all_generative_model_classes = (FlaxWhisperForConditionalGeneration,) if is_flax_available() else () is_encoder_decoder = True test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = FlaxWhisperModelTester(self) _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self.init_shape = (1,) + inputs_dict["input_features"].shape[1:] self.all_model_classes = ( make_partial_class(model_class, input_shape=self.init_shape) for model_class in self.all_model_classes ) self.config_tester = ConfigTester(self, config_class=WhisperConfig) def test_config(self): self.config_tester.run_common_tests() # overwrite because of `input_features` def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features", "decoder_input_ids"] self.assertListEqual(arg_names[:2], expected_arg_names) # overwrite because of `input_features` def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_features, decoder_input_ids, **kwargs): return model(input_features=input_features, decoder_input_ids=decoder_input_ids, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # We override with a slightly higher tol value, as test recently became flaky super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes) # overwrite because of `input_features` @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite because of `input_features` @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix])) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite because of `input_features` @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite because of `input_features` def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = base_class(config) base_params = flatten_dict(unfreeze(model.params)) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix])) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # overwrite because of `input_features` def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = make_partial_class(FLAX_MODEL_MAPPING[config.__class__], input_shape=self.init_shape) for model_class in self.all_model_classes: if model_class.__name__ == base_class.__name__: continue model = model_class(config) base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix])) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = flatten_dict(unfreeze(base_model.params)) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_encoder_sinusoidal_embed_positions(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) params = model.params if model.base_model_prefix in params: params = model.params[model.base_model_prefix] embeds = params["encoder"]["embed_positions"]["embedding"] sinusoids = sinusoidal_embedding_init(None, embeds.shape) self.assertTrue(jax.numpy.allclose(embeds, sinusoids)) @slow @require_flax class FlaxWhisperModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_tiny_logits_librispeech(self): model = FlaxWhisperModel.from_pretrained("openai/whisper-tiny", from_pt=True) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="np").input_features logits = model( input_features, decoder_input_ids=np.array([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, ) # fmt: off EXPECTED_LOGITS = np.array( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) # fmt: on self.assertTrue(np.allclose(logits[0][0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) def test_small_en_logits_librispeech(self): model = FlaxWhisperModel.from_pretrained("openai/whisper-small.en", from_pt=True) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="np").input_features logits = model( input_features, decoder_input_ids=np.array([model.config.decoder_start_token_id]), output_hidden_states=False, output_attentions=False, return_dict=False, ) logits = logits[0] @ model.params["model"]["decoder"]["embed_tokens"]["embedding"].T # fmt: off EXPECTED_LOGITS = np.array( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) # fmt: on self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) def test_large_logits_librispeech(self): model = FlaxWhisperModel.from_pretrained("openai/whisper-large", from_pt=True) input_speech = self._load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="np" ) input_features = processed_inputs.input_features decoder_input_ids = processed_inputs.labels logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, return_dict=False, ) logits = logits[0] @ model.params["model"]["decoder"]["embed_tokens"]["embedding"].T # fmt: off EXPECTED_LOGITS = np.array( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) # fmt: on self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) def test_tiny_en_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor( raw_speech=input_speech, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="jax" ).input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20).sequences transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_tiny_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny", from_pt=True) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor( raw_speech=input_speech, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="jax" ).input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20).sequences transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_large_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor( raw_speech=input_speech, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="jax" ).input_features model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") generated_ids = model.generate(input_features, num_beams=5, max_length=20).sequences transcript = processor.tokenizer.decode(generated_ids[0], skip_special_tokens=True) EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_large_generation_multilingual(self): processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True) ds = load_dataset("legacy-datasets/common_voice", "ja", split="test", streaming=True, trust_remote_code=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="np") model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="transcribe") generated_ids = model.generate(input_features, do_sample=False, max_length=20).sequences transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") generated_ids = model.generate( input_features, do_sample=False, max_length=20, ).sequences transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." self.assertEqual(transcript, EXPECTED_TRANSCRIPT) model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="translate") generated_ids = model.generate(input_features, do_sample=False, max_length=20).sequences transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) def test_large_batched_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-large", from_pt=True) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="np").input_features generated_ids = model.generate(input_features, max_length=20).sequences # fmt: off EXPECTED_LOGITS = np.array( [ [50258, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404, 281], [50258, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257, 50257], [50258, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256], [50258, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 11] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all,", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) def test_tiny_en_batched_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="np").input_features generated_ids = model.generate(input_features, max_length=20).sequences # fmt: off EXPECTED_LOGITS = np.array( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_timestamp_generation(self): processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = np.concatenate(self._load_datasamples(4)) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="jax").input_features generate_fn = jax.jit(functools.partial(model.generate, max_length=448, return_timestamps=True)) generated_ids = generate_fn(input_features) EXPECTED_OUTPUT = np.array([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) # fmt: skip self.assertTrue(np.allclose(generated_ids, EXPECTED_OUTPUT)) EXPECTED_TRANSCRIPT = [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is" " Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season" " of the year, with Christmas and roast beef looming before us, similarly drawn from eating and" " its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins'" " work is really Greek after all, and" ), "offsets": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 6.5600000000000005), }, { "text": " Nor is Mr. Quilter's manner less interesting than his matter.", "timestamp": (6.5600000000000005, 11.24), }, { "text": ( " He tells us that at this festive season of the year, with Christmas and roast beef" " looming" ), "timestamp": (11.24, 16.88), }, { "text": ( " before us, similarly drawn from eating and its results occur most readily to the mind." ), "timestamp": (16.88, 23.76), }, { "text": ( " He has grave doubts whether Sir Frederick Latins' work is really Greek after all, and" ), "timestamp": (23.76, 29.44), }, ], } ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True, output_offsets=True) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) class FlaxWhisperEncoderModelTester: def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, num_mel_bins=80, num_conv_layers=1, suppress_tokens=None, classifier_proj_size=4, num_labels=2, is_encoder_decoder=False, is_decoder=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.classifier_proj_size = classifier_proj_size self.num_labels = num_labels self.is_encoder_decoder = is_encoder_decoder self.is_decoder = is_decoder def get_config(self): return WhisperConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, suppress_tokens=self.suppress_tokens, classifier_proj_size=self.classifier_proj_size, num_labels=self.num_labels, is_encoder_decoder=self.is_encoder_decoder, is_decoder=self.is_decoder, ) def prepare_whisper_encoder_inputs_dict( self, input_features, ): return { "input_features": input_features, } def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length]) config = self.get_config() inputs_dict = self.prepare_whisper_encoder_inputs_dict( input_features=input_features, ) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths @property def encoder_seq_length(self): return self.get_subsampled_output_lengths(self.seq_length) @require_flax class WhisperEncoderModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = (FlaxWhisperForAudioClassification,) if is_flax_available() else () is_encoder_decoder = False fx_compatible = False test_pruning = False test_missing_keys = False input_name = "input_features" def setUp(self): self.model_tester = FlaxWhisperEncoderModelTester(self) _, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self.init_shape = (1,) + inputs_dict["input_features"].shape[1:] self.all_model_classes = ( make_partial_class(model_class, input_shape=self.init_shape) for model_class in self.all_model_classes ) self.config_tester = ConfigTester(self, config_class=WhisperConfig) def test_config(self): self.config_tester.run_common_tests() # overwrite because of `input_features` def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_features, **kwargs): return model(input_features=input_features, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) # overwrite because of `input_features` def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_features", "attention_mask", "output_attentions"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_inputs_embeds(self): pass # WhisperEncoder has no inputs_embeds and thus the `get_input_embeddings` fn is not implemented def test_model_common_attributes(self): pass # WhisperEncoder cannot resize token embeddings since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # WhisperEncoder does not have any base model def test_save_load_to_base(self): pass # WhisperEncoder does not have any base model def test_save_load_from_base(self): pass # WhisperEncoder does not have any base model @is_pt_flax_cross_test def test_save_load_from_base_pt(self): pass # WhisperEncoder does not have any base model @is_pt_flax_cross_test def test_save_load_to_base_pt(self): pass # WhisperEncoder does not have any base model @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): pass
transformers/tests/models/whisper/test_modeling_flax_whisper.py/0
{ "file_path": "transformers/tests/models/whisper/test_modeling_flax_whisper.py", "repo_id": "transformers", "token_count": 18408 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import logging import os import sys import tempfile import unittest from pathlib import Path import datasets import numpy as np from huggingface_hub import HfFolder, Repository, delete_repo from requests.exceptions import HTTPError from transformers import ( AutomaticSpeechRecognitionPipeline, AutoModelForSequenceClassification, AutoTokenizer, DistilBertForSequenceClassification, MaskGenerationPipeline, T5ForConditionalGeneration, TextClassificationPipeline, TextGenerationPipeline, TFAutoModelForSequenceClassification, pipeline, ) from transformers.pipelines import PIPELINE_REGISTRY, get_task from transformers.pipelines.base import Pipeline, _pad from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, RequestCounter, backend_empty_cache, is_pipeline_test, is_staging_test, nested_simplify, require_tensorflow_probability, require_tf, require_torch, require_torch_accelerator, require_torch_multi_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.utils import direct_transformers_import, is_tf_available, is_torch_available from transformers.utils import logging as transformers_logging sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_pipeline import PairClassificationPipeline # noqa E402 logger = logging.getLogger(__name__) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent.parent, "src/transformers") # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) class ANY: def __init__(self, *_types): self._types = _types def __eq__(self, other): return isinstance(other, self._types) def __repr__(self): return f"ANY({', '.join(_type.__name__ for _type in self._types)})" @is_pipeline_test class CommonPipelineTest(unittest.TestCase): @require_torch def test_pipeline_iteration(self): from torch.utils.data import Dataset class MyDataset(Dataset): data = [ "This is a test", "This restaurant is great", "This restaurant is awful", ] def __len__(self): return 3 def __getitem__(self, i): return self.data[i] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) dataset = MyDataset() for output in text_classifier(dataset): self.assertEqual(output, {"label": ANY(str), "score": ANY(float)}) @require_torch def test_check_task_auto_inference(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertIsInstance(pipe, TextClassificationPipeline) @require_torch def test_pipeline_batch_size_global(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertEqual(pipe._batch_size, None) self.assertEqual(pipe._num_workers, None) pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1) self.assertEqual(pipe._batch_size, 2) self.assertEqual(pipe._num_workers, 1) @require_torch def test_pipeline_pathlike(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") with tempfile.TemporaryDirectory() as d: pipe.save_pretrained(d) path = Path(d) newpipe = pipeline(task="text-classification", model=path) self.assertIsInstance(newpipe, TextClassificationPipeline) @require_torch def test_pipeline_override(self): class MyPipeline(TextClassificationPipeline): pass text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline) self.assertIsInstance(text_classifier, MyPipeline) def test_check_task(self): task = get_task("openai-community/gpt2") self.assertEqual(task, "text-generation") with self.assertRaises(RuntimeError): # Wrong framework get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best") @require_torch def test_iterator_data(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) # When using multiple workers on streamable data it should still work # This will force using `num_workers=1` with a warning for now. results = [] for out in pipe(data(10), num_workers=2): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_tf def test_iterator_data_tf(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf") out = pipe("This is a test") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_torch def test_unbatch_attentions_hidden_states(self): model = DistilBertForSequenceClassification.from_pretrained( "hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert") text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) # Used to throw an error because `hidden_states` are a tuple of tensors # instead of the expected tensor. outputs = text_classifier(["This is great !"] * 20, batch_size=32) self.assertEqual(len(outputs), 20) @require_torch def test_torch_dtype_property(self): import torch model_id = "hf-internal-testing/tiny-random-distilbert" # If dtype is specified in the pipeline constructor, the property should return that type pipe = pipeline(model=model_id, torch_dtype=torch.float16) self.assertEqual(pipe.torch_dtype, torch.float16) # If the underlying model changes dtype, the property should return the new type pipe.model.to(torch.bfloat16) self.assertEqual(pipe.torch_dtype, torch.bfloat16) # If dtype is NOT specified in the pipeline constructor, the property should just return # the dtype of the underlying model (default) pipe = pipeline(model=model_id) self.assertEqual(pipe.torch_dtype, torch.float32) # If underlying model doesn't have dtype property, simply return None pipe.model = None self.assertIsNone(pipe.torch_dtype) @require_torch def test_auto_model_pipeline_registration_from_local_dir(self): with tempfile.TemporaryDirectory() as tmp_dir: _ = Repository(local_dir=tmp_dir, clone_from="hf-internal-testing/tiny-random-custom-architecture") pipe = pipeline("text-generation", tmp_dir, trust_remote_code=True) self.assertIsInstance(pipe, TextGenerationPipeline) # Assert successful load @require_torch def test_pipeline_with_task_parameters_no_side_effects(self): """ Regression test: certain pipeline flags, like `task`, modified the model configuration, causing unexpected side-effects """ # This checkpoint has task-specific parameters that will modify the behavior of the pipeline model = T5ForConditionalGeneration.from_pretrained("t5-small") self.assertTrue(model.config.num_beams == 1) # The task-specific parameters used to cause side-effects on `model.config` -- not anymore pipe = pipeline(model=model, tokenizer=AutoTokenizer.from_pretrained("t5-small"), task="translation_en_to_de") self.assertTrue(model.config.num_beams == 1) self.assertTrue(model.generation_config.num_beams == 1) # Under the hood: we now store a generation config in the pipeline. This generation config stores the # task-specific paremeters. self.assertTrue(pipe.generation_config.num_beams == 4) # We can confirm that the task-specific parameters have an effect. (In this case, the default is `num_beams=1`, # which would crash when `num_return_sequences=4` is passed.) pipe("Hugging Face doesn't sell hugs.", num_return_sequences=4) with self.assertRaises(ValueError): pipe("Hugging Face doesn't sell hugs.", num_return_sequences=4, num_beams=1) @is_pipeline_test class PipelineScikitCompatTest(unittest.TestCase): @require_torch def test_pipeline_predict_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_predict_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_torch def test_pipeline_transform_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_transform_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @is_pipeline_test class PipelinePadTest(unittest.TestCase): @require_torch def test_pipeline_padding(self): import torch items = [ { "label": "label1", "input_ids": torch.LongTensor([[1, 23, 24, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 0]]), }, { "label": "label2", "input_ids": torch.LongTensor([[1, 23, 24, 43, 44, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 1, 1, 0]]), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "right"), torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "left"), torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "attention_mask", 0, "right"), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]]) ) ) @require_torch def test_pipeline_image_padding(self): import torch items = [ { "label": "label1", "pixel_values": torch.zeros((1, 3, 10, 10)), }, { "label": "label2", "pixel_values": torch.zeros((1, 3, 10, 10)), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "pixel_values", 10, "right"), torch.zeros((2, 3, 10, 10)), ) ) @require_torch def test_pipeline_offset_mapping(self): import torch items = [ { "offset_mappings": torch.zeros([1, 11, 2], dtype=torch.long), }, { "offset_mappings": torch.zeros([1, 4, 2], dtype=torch.long), }, ] self.assertTrue( torch.allclose( _pad(items, "offset_mappings", 0, "right"), torch.zeros((2, 11, 2), dtype=torch.long), ), ) @is_pipeline_test class PipelineUtilsTest(unittest.TestCase): @require_torch def test_pipeline_dataset(self): from transformers.pipelines.pt_utils import PipelineDataset dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineDataset(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = [dataset[i] for i in range(4)] self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator_no_len(self): from transformers.pipelines.pt_utils import PipelineIterator def dummy_dataset(): for i in range(4): yield i def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset(), add, {"extra": 2}) with self.assertRaises(TypeError): len(dataset) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_batch_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": [0, 1, 2]}, {"id": [3]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]]} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]) @require_torch def test_pipeline_batch_unbatch_iterator_tensors(self): import torch from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": torch.LongTensor([[10, 20], [0, 1], [0, 2]])}, {"id": torch.LongTensor([[3]])}] def add(number, extra=0): return {"id": number["id"] + extra} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual( nested_simplify(outputs), [{"id": [[12, 22]]}, {"id": [[2, 3]]}, {"id": [[2, 4]]}, {"id": [[5]]}] ) @require_torch def test_pipeline_chunk_iterator(self): from transformers.pipelines.pt_utils import PipelineChunkIterator def preprocess_chunk(n: int): for i in range(n): yield i dataset = [2, 3] dataset = PipelineChunkIterator(dataset, preprocess_chunk, {}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [0, 1, 0, 1, 2]) @require_torch def test_pipeline_pack_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator def pack(item): return {"id": item["id"] + 1, "is_last": item["is_last"]} dataset = [ {"id": 0, "is_last": False}, {"id": 1, "is_last": True}, {"id": 0, "is_last": False}, {"id": 1, "is_last": False}, {"id": 2, "is_last": True}, ] dataset = PipelinePackIterator(dataset, pack, {}) outputs = list(dataset) self.assertEqual( outputs, [ [ {"id": 1}, {"id": 2}, ], [ {"id": 1}, {"id": 2}, {"id": 3}, ], ], ) @require_torch def test_pipeline_pack_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, True, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}], [{"id": 4}, {"id": 5}]]) # is_false Across batch dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, False, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]]) def test_pipeline_negative_device(self): # To avoid regressing, pipeline used to accept device=-1 classifier = pipeline("text-generation", "hf-internal-testing/tiny-random-bert", device=-1) expected_output = [{"generated_text": ANY(str)}] actual_output = classifier("Test input.") self.assertEqual(expected_output, actual_output) @require_torch_accelerator def test_pipeline_no_device(self): # Test when no device is passed to pipeline import torch from transformers import AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # Case 1: Model is manually moved to device model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-bert", torch_dtype=torch.float16 ).to(torch_device) model_device = model.device pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) self.assertEqual(pipe.model.device, model_device) # Case 2: Model is loaded by accelerate model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-bert", device_map=torch_device, torch_dtype=torch.float16 ) model_device = model.device pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) self.assertEqual(pipe.model.device, model_device) # Case 3: device_map is passed to model and device is passed to pipeline model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-bert", device_map=torch_device, torch_dtype=torch.float16 ) with self.assertRaises(ValueError): pipe = pipeline("text-generation", model=model, device="cpu", tokenizer=tokenizer) @require_torch_multi_accelerator def test_pipeline_device_not_equal_model_device(self): # Test when device ids are different, pipeline should move the model to the passed device id import torch from transformers import AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") model_device = f"{torch_device}:1" model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-bert", torch_dtype=torch.float16 ).to(model_device) target_device = f"{torch_device}:0" self.assertNotEqual(model_device, target_device) pipe = pipeline("text-generation", model=model, device=target_device, tokenizer=tokenizer) self.assertEqual(pipe.model.device, torch.device(target_device)) @slow @require_torch def test_load_default_pipelines_pt(self): import torch from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_tf def test_load_default_pipelines_tf(self): from transformers.modeling_tf_utils import keras from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: keras.utils.set_random_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by TF gc.collect() @slow @require_torch def test_load_default_pipelines_pt_table_qa(self): import torch set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator_indexed(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_tf @require_tensorflow_probability def test_load_default_pipelines_tf_table_qa(self): import tensorflow as tf set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() def check_default_pipeline(self, task, framework, set_seed_fn, check_models_equal_fn): from transformers.pipelines import SUPPORTED_TASKS, pipeline task_dict = SUPPORTED_TASKS[task] # test to compare pipeline to manually loading the respective model model = None relevant_auto_classes = task_dict[framework] if len(relevant_auto_classes) == 0: # task has no default logger.debug(f"{task} in {framework} has no default") self.skipTest(f"{task} in {framework} has no default") # by default use first class auto_model_cls = relevant_auto_classes[0] # retrieve correct model ids if task == "translation": # special case for translation pipeline which has multiple languages model_ids = [] revisions = [] tasks = [] for translation_pair in task_dict["default"].keys(): model_id, revision = task_dict["default"][translation_pair]["model"][framework] model_ids.append(model_id) revisions.append(revision) tasks.append(task + f"_{'_to_'.join(translation_pair)}") else: # normal case - non-translation pipeline model_id, revision = task_dict["default"]["model"][framework] model_ids = [model_id] revisions = [revision] tasks = [task] # check for equality for model_id, revision, task in zip(model_ids, revisions, tasks): # load default model try: set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) except ValueError: # first auto class is possible not compatible with model, go to next model class auto_model_cls = relevant_auto_classes[1] set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) # load default pipeline set_seed_fn() default_pipeline = pipeline(task, framework=framework) # compare pipeline model with default model models_are_equal = check_models_equal_fn(default_pipeline.model, model) self.assertTrue(models_are_equal, f"{task} model doesn't match pipeline.") logger.debug(f"{task} in {framework} succeeded with {model_id}.") def check_models_equal_pt(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal def check_models_equal_tf(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.weights, model2.weights): if np.abs(model1_p.numpy() - model2_p.numpy()).sum() > 1e-5: models_are_equal = False return models_are_equal class CustomPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, text, maybe_arg=2): input_ids = self.tokenizer(text, return_tensors="pt") return input_ids def _forward(self, model_inputs): outputs = self.model(**model_inputs) return outputs def postprocess(self, model_outputs): return model_outputs["logits"].softmax(-1).numpy() @is_pipeline_test class CustomPipelineTest(unittest.TestCase): def test_warning_logs(self): transformers_logging.set_verbosity_debug() logger_ = transformers_logging.get_logger("transformers.pipelines.base") alias = "text-classification" # Get the original task, so we can restore it at the end. # (otherwise the subsequential tests in `TextClassificationPipelineTests` will fail) _, original_task, _ = PIPELINE_REGISTRY.check_task(alias) try: with CaptureLogger(logger_) as cm: PIPELINE_REGISTRY.register_pipeline(alias, PairClassificationPipeline) self.assertIn(f"{alias} is already registered", cm.out) finally: # restore PIPELINE_REGISTRY.supported_tasks[alias] = original_task def test_register_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "custom-text-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, default={"pt": "hf-internal-testing/tiny-random-distilbert"}, type="text", ) assert "custom-text-classification" in PIPELINE_REGISTRY.get_supported_tasks() _, task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification") self.assertEqual(task_def["pt"], (AutoModelForSequenceClassification,) if is_torch_available() else ()) self.assertEqual(task_def["tf"], (TFAutoModelForSequenceClassification,) if is_tf_available() else ()) self.assertEqual(task_def["type"], "text") self.assertEqual(task_def["impl"], PairClassificationPipeline) self.assertEqual(task_def["default"], {"model": {"pt": "hf-internal-testing/tiny-random-distilbert"}}) # Clean registry for next tests. del PIPELINE_REGISTRY.supported_tasks["custom-text-classification"] @require_torch_or_tf def test_dynamic_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, ) classifier = pipeline("pair-classification", model="hf-internal-testing/tiny-random-bert") # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] with tempfile.TemporaryDirectory() as tmp_dir: classifier.save_pretrained(tmp_dir) # checks self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",) if is_torch_available() else (), "tf": ("TFAutoModelForSequenceClassification",) if is_tf_available() else (), } }, ) # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=tmp_dir) new_classifier = pipeline(model=tmp_dir, trust_remote_code=True) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline("text-classification", model=tmp_dir, trust_remote_code=False) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") self.assertEqual(new_classifier.task, "pair-classification") results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual( nested_simplify(results), {"label": "LABEL_0", "score": 0.505, "logits": [-0.003, -0.024]}, ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify(results), [{"label": "LABEL_0", "score": 0.505}], ) @require_torch_or_tf def test_cached_pipeline_has_minimum_calls_to_head(self): # Make sure we have cached the pipeline. _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) @require_torch def test_chunk_pipeline_batching_single_file(self): # Make sure we have cached the pipeline. pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") # For some reason scoping doesn't work if not using `self.` self.COUNT = 0 forward = pipe.model.forward def new_forward(*args, **kwargs): self.COUNT += 1 return forward(*args, **kwargs) pipe.model.forward = new_forward for out in pipe(audio, return_timestamps="char", chunk_length_s=3, stride_length_s=[1, 1], batch_size=1024): pass self.assertEqual(self.COUNT, 1) @require_torch def test_custom_code_with_string_tokenizer(self): # This test checks for an edge case - tokenizer loading used to fail when using a custom code model # with a separate tokenizer that was passed as a repo name rather than a tokenizer object. # See https://github.com/huggingface/transformers/issues/31669 text_generator = pipeline( "text-generation", model="hf-internal-testing/tiny-random-custom-architecture", tokenizer="hf-internal-testing/tiny-random-custom-architecture", trust_remote_code=True, ) self.assertIsInstance(text_generator, TextGenerationPipeline) # Assert successful loading @require_torch def test_custom_code_with_string_feature_extractor(self): speech_recognizer = pipeline( "automatic-speech-recognition", model="hf-internal-testing/fake-custom-wav2vec2", feature_extractor="hf-internal-testing/fake-custom-wav2vec2", trust_remote_code=True, ) self.assertIsInstance(speech_recognizer, AutomaticSpeechRecognitionPipeline) # Assert successful loading @require_torch def test_custom_code_with_string_preprocessor(self): mask_generator = pipeline( "mask-generation", model="hf-internal-testing/fake-custom-sam", processor="hf-internal-testing/fake-custom-sam", trust_remote_code=True, ) self.assertIsInstance(mask_generator, MaskGenerationPipeline) # Assert successful loading @require_torch @is_staging_test class DynamicPipelineTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "I", "love", "hate", "you"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-dynamic-pipeline") except HTTPError: pass @unittest.skip("Broken, TODO @Yih-Dar") def test_push_to_hub_dynamic_pipeline(self): from transformers import BertConfig, BertForSequenceClassification, BertTokenizer PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, ) config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertForSequenceClassification(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) classifier = pipeline("pair-classification", model=model, tokenizer=tokenizer) # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] classifier.save_pretrained(tmp_dir) # checks if the configuration has been added after calling the save_pretrained method self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",), "tf": (), } }, ) # use push_to_hub method to push the pipeline classifier.push_to_hub(f"{USER}/test-dynamic-pipeline", token=self._token) # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=f"{USER}/test-dynamic-pipeline") new_classifier = pipeline(model=f"{USER}/test-dynamic-pipeline", trust_remote_code=True) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") # check for tag exitence, tag needs to be added when we are calling a custom pipeline from the hub # useful for cases such as finetuning self.assertDictEqual( new_classifier.model.config.custom_pipelines, { "pair-classification": { "impl": f"{USER}/test-dynamic-pipeline--custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",), "tf": (), } }, ) # test if the pipeline still works after the model is finetuned # (we are actually testing if the pipeline still works from the final repo) # this is where the user/repo--module.class is used for new_classifier.model.push_to_hub(repo_name=f"{USER}/test-pipeline-for-a-finetuned-model", token=self._token) del new_classifier # free up memory new_classifier = pipeline(model=f"{USER}/test-pipeline-for-a-finetuned-model", trust_remote_code=True) results = classifier("I hate you", second_text="I love you") new_results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual(nested_simplify(results), nested_simplify(new_results)) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline( "text-classification", model=f"{USER}/test-dynamic-pipeline", trust_remote_code=False ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") new_results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify([{"label": results["label"], "score": results["score"]}]), nested_simplify(new_results) )
transformers/tests/pipelines/test_pipelines_common.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_common.py", "repo_id": "transformers", "token_count": 17527 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, Text2TextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class Text2TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): generator = Text2TextGenerationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, ) return generator, ["Something to write", "Something else"] def run_pipeline_test(self, generator, _): outputs = generator("Something there") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["generated_text"].startswith("Something there")) outputs = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) outputs = generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): generator(4) @require_torch def test_small_model_pt(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="pt") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}]) num_return_sequences = 3 outputs = generator( "Something there", num_return_sequences=num_return_sequences, num_beams=num_return_sequences, ) target_outputs = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(outputs, target_outputs) outputs = generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ) generator.tokenizer.pad_token_id = generator.model.config.eos_token_id generator.tokenizer.pad_token = "<pad>" outputs = generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ], ) @require_tf def test_small_model_tf(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="tf") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}])
transformers/tests/pipelines/test_pipelines_text2text_generation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_text2text_generation.py", "repo_id": "transformers", "token_count": 2311 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import sys import tempfile import unittest from contextlib import contextmanager from pathlib import Path from git import Repo from transformers.testing_utils import CaptureStdout REPO_PATH = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(REPO_PATH, "utils")) import tests_fetcher # noqa: E402 from tests_fetcher import ( # noqa: E402 checkout_commit, clean_code, create_reverse_dependency_map, create_reverse_dependency_tree, diff_is_docstring_only, extract_imports, get_all_tests, get_diff, get_module_dependencies, get_tree_starting_at, infer_tests_to_run, init_test_examples_dependencies, parse_commit_message, print_tree_deps_of, ) BERT_MODELING_FILE = "src/transformers/models/bert/modeling_bert.py" BERT_MODEL_FILE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code """ BERT_MODEL_FILE_NEW_DOCSTRING = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. It has been updated. ''' This is the code """ BERT_MODEL_FILE_NEW_CODE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code. It has been updated """ def create_tmp_repo(tmp_dir, models=None): """ Creates a repository in a temporary directory mimicking the structure of Transformers. Uses the list of models provided (which defaults to just `["bert"]`). """ tmp_dir = Path(tmp_dir) if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(exist_ok=True) repo = Repo.init(tmp_dir) if models is None: models = ["bert"] class_names = [model[0].upper() + model[1:] for model in models] transformers_dir = tmp_dir / "src" / "transformers" transformers_dir.mkdir(parents=True, exist_ok=True) with open(transformers_dir / "__init__.py", "w") as f: init_lines = ["from .utils import cached_file, is_torch_available"] init_lines.extend( [f"from .models.{model} import {cls}Config, {cls}Model" for model, cls in zip(models, class_names)] ) f.write("\n".join(init_lines) + "\n") with open(transformers_dir / "configuration_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") with open(transformers_dir / "modeling_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") utils_dir = tmp_dir / "src" / "transformers" / "utils" utils_dir.mkdir(exist_ok=True) with open(utils_dir / "__init__.py", "w") as f: f.write("from .hub import cached_file\nfrom .imports import is_torch_available\n") with open(utils_dir / "hub.py", "w") as f: f.write("import huggingface_hub\n\ncode") with open(utils_dir / "imports.py", "w") as f: f.write("code") model_dir = tmp_dir / "src" / "transformers" / "models" model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("\n".join([f"import {model}" for model in models])) for model, cls in zip(models, class_names): model_dir = tmp_dir / "src" / "transformers" / "models" / model model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write(f"from .configuration_{model} import {cls}Config\nfrom .modeling_{model} import {cls}Model\n") with open(model_dir / f"configuration_{model}.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / f"modeling_{model}.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", model).replace("Bert", cls) f.write(modeling_code) test_dir = tmp_dir / "tests" test_dir.mkdir(exist_ok=True) with open(test_dir / "test_modeling_common.py", "w") as f: f.write("from transformers.modeling_utils import PreTrainedModel\ncode") for model, cls in zip(models, class_names): test_model_dir = test_dir / "models" / model test_model_dir.mkdir(parents=True, exist_ok=True) (test_model_dir / "__init__.py").touch() with open(test_model_dir / f"test_modeling_{model}.py", "w") as f: f.write( f"from transformers import {cls}Config, {cls}Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) example_dir = tmp_dir / "examples" example_dir.mkdir(exist_ok=True) for framework in ["flax", "pytorch", "tensorflow"]: framework_dir = example_dir / framework framework_dir.mkdir(exist_ok=True) with open(framework_dir / f"test_{framework}_examples.py", "w") as f: f.write("""test_args = "run_glue.py"\n""") glue_dir = framework_dir / "text-classification" glue_dir.mkdir(exist_ok=True) with open(glue_dir / "run_glue.py", "w") as f: f.write("from transformers import BertModel\n\ncode") repo.index.add(["examples", "src", "tests"]) repo.index.commit("Initial commit") repo.create_head("main") repo.head.reference = repo.refs.main repo.delete_head("master") return repo @contextmanager def patch_transformer_repo_path(new_folder): """ Temporarily patches the variables defines in `tests_fetcher` to use a different location for the repo. """ old_repo_path = tests_fetcher.PATH_TO_REPO tests_fetcher.PATH_TO_REPO = Path(new_folder).resolve() tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" try: yield finally: tests_fetcher.PATH_TO_REPO = old_repo_path tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" def commit_changes(filenames, contents, repo, commit_message="Commit"): """ Commit new `contents` to `filenames` inside a given `repo`. """ if not isinstance(filenames, list): filenames = [filenames] if not isinstance(contents, list): contents = [contents] folder = Path(repo.working_dir) for filename, content in zip(filenames, contents): with open(folder / filename, "w") as f: f.write(content) repo.index.add(filenames) commit = repo.index.commit(commit_message) return commit.hexsha class TestFetcherTester(unittest.TestCase): def test_checkout_commit(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_sha = repo.head.commit.hexsha new_sha = commit_changes(BERT_MODELING_FILE, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert repo.head.commit.hexsha == new_sha with checkout_commit(repo, initial_sha): assert repo.head.commit.hexsha == initial_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE assert repo.head.commit.hexsha == new_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE_NEW_DOCSTRING def test_clean_code(self): # Clean code removes all strings in triple quotes assert clean_code('"""\nDocstring\n"""\ncode\n"""Long string"""\ncode\n') == "code\ncode" assert clean_code("'''\nDocstring\n'''\ncode\n'''Long string'''\ncode\n'''") == "code\ncode" # Clean code removes all comments assert clean_code("code\n# Comment\ncode") == "code\ncode" assert clean_code("code # inline comment\ncode") == "code \ncode" def test_get_all_tests(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): assert get_all_tests() == ["tests/models/bert", "tests/test_modeling_common.py"] def test_get_all_tests_on_full_repo(self): all_tests = get_all_tests() assert "tests/models/albert" in all_tests assert "tests/models/bert" in all_tests assert "tests/repo_utils" in all_tests assert "tests/test_pipeline_mixin.py" in all_tests assert "tests/models" not in all_tests assert "tests/__pycache__" not in all_tests assert "tests/models/albert/test_modeling_albert.py" not in all_tests assert "tests/repo_utils/test_tests_fetcher.py" not in all_tests def test_diff_is_docstring_only(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) branching_point = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert diff_is_docstring_only(repo, branching_point, bert_file) commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert not diff_is_docstring_only(repo, branching_point, bert_file) def test_get_diff(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_commit = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING + "\n# Adding a comment\n", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [ "src/transformers/models/bert/modeling_bert.py" ] commit_changes("src/transformers/utils/hub.py", "import huggingface_hub\n\nnew code", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == ["src/transformers/utils/hub.py"] assert get_diff(repo, repo.head.commit, [initial_commit]) == [ "src/transformers/models/bert/modeling_bert.py", "src/transformers/utils/hub.py", ] def test_extract_imports_relative(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_imports = [ ("src/transformers/modeling_utils.py", ["PreTrainedModel"]), ("src/transformers/utils/__init__.py", ["is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] expected_utils_imports = [ ("src/transformers/utils/hub.py", ["cached_file"]), ("src/transformers/utils/imports.py", ["is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports assert extract_imports("src/transformers/utils/__init__.py") == expected_utils_imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import cached_file, is_torch_available\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import (\n cached_file,\n is_torch_available\n)\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_extract_imports_absolute(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import cached_file, is_torch_available\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with base imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/__init__.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_get_module_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies expected_test_bert_dependencies = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("tests/models/bert/test_modeling_bert.py") == expected_test_bert_dependencies ) # Test with a submodule (tmp_folder / "src/transformers/utils/logging.py").touch() with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import logging\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/logging.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an object non-imported in the init create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import CONSTANT\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/__init__.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an example create_tmp_repo(tmp_folder) expected_example_dependencies = ["src/transformers/models/bert/modeling_bert.py"] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("examples/pytorch/text-classification/run_glue.py") == expected_example_dependencies ) def test_create_reverse_dependency_tree(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): tree = create_reverse_dependency_tree() init_edges = [ "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "src/transformers/__init__.py"} == set(init_edges) bert_edges = [ "src/transformers/modeling_utils.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", ] assert {f for f, g in tree if g == "src/transformers/models/bert/modeling_bert.py"} == set(bert_edges) test_bert_edges = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "tests/models/bert/test_modeling_bert.py"} == set(test_bert_edges) def test_get_tree_starting_at(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): edges = create_reverse_dependency_tree() bert_tree = get_tree_starting_at("src/transformers/models/bert/modeling_bert.py", edges) config_utils_tree = get_tree_starting_at("src/transformers/configuration_utils.py", edges) expected_bert_tree = [ "src/transformers/models/bert/modeling_bert.py", [("src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py")], ] assert bert_tree == expected_bert_tree expected_config_tree = [ "src/transformers/configuration_utils.py", [("src/transformers/configuration_utils.py", "src/transformers/models/bert/configuration_bert.py")], [ ("src/transformers/models/bert/configuration_bert.py", "tests/models/bert/test_modeling_bert.py"), ( "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ), ], ] # Order of the edges is random assert [set(v) for v in config_utils_tree] == [set(v) for v in expected_config_tree] def test_print_tree_deps_of(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) # There are two possible outputs since the order of the last two lines is non-deterministic. expected_std_out = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py""" expected_std_out_2 = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py tests/models/bert/test_modeling_bert.py src/transformers/models/bert/modeling_bert.py""" with patch_transformer_repo_path(tmp_folder), CaptureStdout() as cs: print_tree_deps_of("src/transformers/models/bert/modeling_bert.py") print_tree_deps_of("src/transformers/configuration_utils.py") assert cs.out.strip() in [expected_std_out, expected_std_out_2] def test_init_test_examples_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_example_deps = { "examples/flax/test_flax_examples.py": [ "examples/flax/text-classification/run_glue.py", "examples/flax/test_flax_examples.py", ], "examples/pytorch/test_pytorch_examples.py": [ "examples/pytorch/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", ], "examples/tensorflow/test_tensorflow_examples.py": [ "examples/tensorflow/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", ], } expected_examples = { "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } with patch_transformer_repo_path(tmp_folder): example_deps, all_examples = init_test_examples_dependencies() assert example_deps == expected_example_deps assert {str(f.relative_to(tmp_folder)) for f in all_examples} == expected_examples def test_create_reverse_dependency_map(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # impact of BERT modeling file (note that we stop at the inits and don't go down further) expected_bert_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/__init__.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/modeling_bert.py"]) == expected_bert_deps # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/utils/__init__.py", "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "src/transformers/configuration_utils.py", "src/transformers/modeling_utils.py", "tests/test_modeling_common.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/__init__.py"]) == expected_init_deps expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps # Test that with more models init of bert only gets deps to bert. create_tmp_repo(tmp_folder, models=["bert", "gpt2"]) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps @unittest.skip("Broken for now TODO @ArthurZucker") def test_infer_tests_to_run(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes("src/transformers/models/bert/modeling_bert.py", BERT_MODEL_FILE_NEW_CODE, repo) example_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", } with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" assert set(example_tests_to_run.split(" ")) == example_tests # Fake a new model addition repo = create_tmp_repo(tmp_folder, models=models) branch = repo.create_head("new_model") branch.checkout() with open(tmp_folder / "src/transformers/__init__.py", "a") as f: f.write("from .models.t5 import T5Config, T5Model\n") model_dir = tmp_folder / "src/transformers/models/t5" model_dir.mkdir(exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("from .configuration_t5 import T5Config\nfrom .modeling_t5 import T5Model\n") with open(model_dir / "configuration_t5.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / "modeling_t5.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", "t5").replace("Bert", "T5") f.write(modeling_code) test_dir = tmp_folder / "tests/models/t5" test_dir.mkdir(exist_ok=True) (test_dir / "__init__.py").touch() with open(test_dir / "test_modeling_t5.py", "w") as f: f.write( "from transformers import T5Config, T5Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) repo.index.add(["src", "tests"]) repo.index.commit("Add T5 model") with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt") with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() expected_tests = { "tests/models/bert/test_modeling_bert.py", "tests/models/gpt2/test_modeling_gpt2.py", "tests/models/t5/test_modeling_t5.py", "tests/test_modeling_common.py", } assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", filter_models=False) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() expected_tests = [f"tests/models/{name}/test_modeling_{name}.py" for name in models + ["t5"]] expected_tests = set(expected_tests + ["tests/test_modeling_common.py"]) assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests @unittest.skip("Broken for now TODO @ArthurZucker") def test_infer_tests_to_run_with_test_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "tests/models/bert/test_modeling_bert.py", "from transformers import BertConfig, BertModel\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt", "r") as f: tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" @unittest.skip("Broken for now TODO @ArthurZucker") def test_infer_tests_to_run_with_examples_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] repo = create_tmp_repo(tmp_folder, models=models) # Modification in one example trigger the corresponding test commit_changes( "examples/pytorch/text-classification/run_glue.py", "from transformers import BertModeln\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" # Modification in one test example file trigger that test repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "examples/pytorch/test_pytorch_examples.py", """test_args = "run_glue.py"\nmore_code""", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt", "r") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" def test_parse_commit_message(self): assert parse_commit_message("Normal commit") == {"skip": False, "no_filter": False, "test_all": False} assert parse_commit_message("[skip ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[ci skip] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip-ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip_ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[no filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no-filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no_filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[filter-no] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[test all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all test] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[test-all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all_test] commit") == {"skip": False, "no_filter": False, "test_all": True}
transformers/tests/repo_utils/test_tests_fetcher.py/0
{ "file_path": "transformers/tests/repo_utils/test_tests_fetcher.py", "repo_id": "transformers", "token_count": 17320 }
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import os import pathlib import tempfile import time import warnings import numpy as np import requests from packaging import version from transformers import AutoImageProcessor, BatchFeature from transformers.image_utils import AnnotationFormat, AnnotionFormat from transformers.testing_utils import ( check_json_file_has_correct_format, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image def prepare_image_inputs( batch_size, min_resolution, max_resolution, num_channels, size_divisor=None, equal_resolution=False, numpify=False, torchify=False, ): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. One can specify whether the images are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" image_inputs = [] for i in range(batch_size): if equal_resolution: width = height = max_resolution else: # To avoid getting image width/height 0 if size_divisor is not None: # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` min_resolution = max(size_divisor, min_resolution) width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2) image_inputs.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension image_inputs = [Image.fromarray(np.moveaxis(image, 0, -1)) for image in image_inputs] if torchify: image_inputs = [torch.from_numpy(image) for image in image_inputs] if numpify: # Numpy images are typically in channels last format image_inputs = [image.transpose(1, 2, 0) for image in image_inputs] return image_inputs def prepare_video(num_frames, num_channels, width=10, height=10, numpify=False, torchify=False): """This function prepares a video as a list of PIL images/NumPy arrays/PyTorch tensors.""" video = [] for i in range(num_frames): video.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension video = [Image.fromarray(np.moveaxis(frame, 0, -1)) for frame in video] if torchify: video = [torch.from_numpy(frame) for frame in video] return video def prepare_video_inputs( batch_size, num_frames, num_channels, min_resolution, max_resolution, equal_resolution=False, numpify=False, torchify=False, ): """This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True. One can specify whether the videos are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" video_inputs = [] for _ in range(batch_size): if equal_resolution: width = height = max_resolution else: width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2) video = prepare_video( num_frames=num_frames, num_channels=num_channels, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs class ImageProcessingTestMixin: test_cast_dtype = None image_processing_class = None fast_image_processing_class = None image_processors_list = None test_slow_image_processor = True test_fast_image_processor = True def setUp(self): image_processor_list = [] if self.test_slow_image_processor and self.image_processing_class: image_processor_list.append(self.image_processing_class) if self.test_fast_image_processor and self.fast_image_processing_class: image_processor_list.append(self.fast_image_processing_class) self.image_processor_list = image_processor_list @require_vision @require_torch def test_slow_fast_equivalence(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") dummy_image = Image.open( requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw ) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_image, return_tensors="pt") encoding_fast = image_processor_fast(dummy_image, return_tensors="pt") self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) self.assertLessEqual( torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3 ) @require_vision @require_torch def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, return_tensors="pt") self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) self.assertLessEqual( torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3 ) @require_vision @require_torch def test_fast_is_faster_than_slow(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping speed test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping speed test as one of the image processors is not defined") def measure_time(image_processor, image): # Warmup for _ in range(5): _ = image_processor(image, return_tensors="pt") start = time.time() _ = image_processor(image, return_tensors="pt") return time.time() - start dummy_images = torch.randint(0, 255, (4, 3, 224, 224), dtype=torch.uint8) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) fast_time = measure_time(image_processor_fast, dummy_images) slow_time = measure_time(image_processor_slow, dummy_images) self.assertLessEqual(fast_time, slow_time) def test_image_processor_to_json_string(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) obj = json.loads(image_processor.to_json_string()) for key, value in self.image_processor_dict.items(): self.assertEqual(obj[key], value) def test_image_processor_to_json_file(self): for image_processing_class in self.image_processor_list: image_processor_first = image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "image_processor.json") image_processor_first.to_json_file(json_file_path) image_processor_second = image_processing_class.from_json_file(json_file_path) self.assertEqual(image_processor_second.to_dict(), image_processor_first.to_dict()) def test_image_processor_from_and_save_pretrained(self): for image_processing_class in self.image_processor_list: image_processor_first = image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = image_processor_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) image_processor_second = image_processing_class.from_pretrained(tmpdirname) self.assertEqual(image_processor_second.to_dict(), image_processor_first.to_dict()) def test_image_processor_save_load_with_autoimageprocessor(self): for i, image_processing_class in enumerate(self.image_processor_list): image_processor_first = image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = image_processor_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) use_fast = i == 1 image_processor_second = AutoImageProcessor.from_pretrained(tmpdirname, use_fast=use_fast) self.assertEqual(image_processor_second.to_dict(), image_processor_first.to_dict()) def test_save_load_fast_slow(self): "Test that we can load a fast image processor from a slow one and vice-versa." if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest("Skipping slow/fast save/load test as one of the image processors is not defined") image_processor_dict = self.image_processor_tester.prepare_image_processor_dict() image_processor_slow_0 = self.image_processing_class(**image_processor_dict) # Load fast image processor from slow one with tempfile.TemporaryDirectory() as tmpdirname: image_processor_slow_0.save_pretrained(tmpdirname) image_processor_fast_0 = self.fast_image_processing_class.from_pretrained(tmpdirname) image_processor_fast_1 = self.fast_image_processing_class(**image_processor_dict) # Load slow image processor from fast one with tempfile.TemporaryDirectory() as tmpdirname: image_processor_fast_1.save_pretrained(tmpdirname) image_processor_slow_1 = self.image_processing_class.from_pretrained(tmpdirname) dict_slow_0 = image_processor_slow_0.to_dict() dict_slow_1 = image_processor_slow_1.to_dict() difference = { key: dict_slow_0.get(key) if key in dict_slow_0 else dict_slow_1.get(key) for key in set(dict_slow_0) ^ set(dict_slow_1) } dict_slow_0 = {key: dict_slow_0[key] for key in set(dict_slow_0) & set(dict_slow_1)} dict_slow_1 = {key: dict_slow_1[key] for key in set(dict_slow_0) & set(dict_slow_1)} # check that all additional keys are None, except for `default_to_square` which is only set in fast processors self.assertTrue(all(value is None for key, value in difference.items() if key not in ["default_to_square"])) # check that the remaining keys are the same self.assertEqual(dict_slow_0, dict_slow_1) dict_fast_0 = image_processor_fast_0.to_dict() dict_fast_1 = image_processor_fast_1.to_dict() difference = { key: dict_fast_0.get(key) if key in dict_fast_0 else dict_fast_1.get(key) for key in set(dict_fast_0) ^ set(dict_fast_1) } dict_fast_0 = {key: dict_fast_0[key] for key in set(dict_fast_0) & set(dict_fast_1)} dict_fast_1 = {key: dict_fast_1[key] for key in set(dict_fast_0) & set(dict_fast_1)} # check that all additional keys are None, except for `default_to_square` which is only set in fast processors self.assertTrue(all(value is None for key, value in difference.items() if key not in ["default_to_square"])) # check that the remaining keys are the same self.assertEqual(dict_fast_0, dict_fast_1) def test_save_load_fast_slow_auto(self): "Test that we can load a fast image processor from a slow one and vice-versa using AutoImageProcessor." if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest("Skipping slow/fast save/load test as one of the image processors is not defined") image_processor_dict = self.image_processor_tester.prepare_image_processor_dict() image_processor_slow_0 = self.image_processing_class(**image_processor_dict) # Load fast image processor from slow one with tempfile.TemporaryDirectory() as tmpdirname: image_processor_slow_0.save_pretrained(tmpdirname) image_processor_fast_0 = AutoImageProcessor.from_pretrained(tmpdirname, use_fast=True) image_processor_fast_1 = self.fast_image_processing_class(**image_processor_dict) # Load slow image processor from fast one with tempfile.TemporaryDirectory() as tmpdirname: image_processor_fast_1.save_pretrained(tmpdirname) image_processor_slow_1 = AutoImageProcessor.from_pretrained(tmpdirname, use_fast=False) dict_slow_0 = image_processor_slow_0.to_dict() dict_slow_1 = image_processor_slow_1.to_dict() difference = { key: dict_slow_0.get(key) if key in dict_slow_0 else dict_slow_1.get(key) for key in set(dict_slow_0) ^ set(dict_slow_1) } dict_slow_0 = {key: dict_slow_0[key] for key in set(dict_slow_0) & set(dict_slow_1)} dict_slow_1 = {key: dict_slow_1[key] for key in set(dict_slow_0) & set(dict_slow_1)} # check that all additional keys are None, except for `default_to_square` which is only set in fast processors self.assertTrue(all(value is None for key, value in difference.items() if key not in ["default_to_square"])) # check that the remaining keys are the same self.assertEqual(dict_slow_0, dict_slow_1) dict_fast_0 = image_processor_fast_0.to_dict() dict_fast_1 = image_processor_fast_1.to_dict() difference = { key: dict_fast_0.get(key) if key in dict_fast_0 else dict_fast_1.get(key) for key in set(dict_fast_0) ^ set(dict_fast_1) } dict_fast_0 = {key: dict_fast_0[key] for key in set(dict_fast_0) & set(dict_fast_1)} dict_fast_1 = {key: dict_fast_1[key] for key in set(dict_fast_0) & set(dict_fast_1)} # check that all additional keys are None, except for `default_to_square` which is only set in fast processors self.assertTrue(all(value is None for key, value in difference.items() if key not in ["default_to_square"])) # check that the remaining keys are the same self.assertEqual(dict_fast_0, dict_fast_1) def test_init_without_params(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class() self.assertIsNotNone(image_processor) @require_torch @require_vision def test_cast_dtype_device(self): for image_processing_class in self.image_processor_list: if self.test_cast_dtype is not None: # Initialize image_processor image_processor = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) encoding = image_processor(image_inputs, return_tensors="pt") # for layoutLM compatiblity self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.float32) encoding = image_processor(image_inputs, return_tensors="pt").to(torch.float16) self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.float16) encoding = image_processor(image_inputs, return_tensors="pt").to("cpu", torch.bfloat16) self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.bfloat16) with self.assertRaises(TypeError): _ = image_processor(image_inputs, return_tensors="pt").to(torch.bfloat16, "cpu") # Try with text + image feature encoding = image_processor(image_inputs, return_tensors="pt") encoding.update({"input_ids": torch.LongTensor([[1, 2, 3], [4, 5, 6]])}) encoding = encoding.to(torch.float16) self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) self.assertEqual(encoding.pixel_values.dtype, torch.float16) self.assertEqual(encoding.input_ids.dtype, torch.long) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape), ) def test_call_numpy_4_channels(self): for image_processing_class in self.image_processor_list: # Test that can process images which have an arbitrary number of channels # Initialize image_processing image_processor = image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) # Test not batched input encoded_images = image_processor( image_inputs[0], return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processor( image_inputs, return_tensors="pt", input_data_format="channels_last", image_mean=0, image_std=1, ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_image_processor_preprocess_arguments(self): is_tested = False for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) # validation done by _valid_processor_keys attribute if hasattr(image_processor, "_valid_processor_keys") and hasattr(image_processor, "preprocess"): preprocess_parameter_names = inspect.getfullargspec(image_processor.preprocess).args preprocess_parameter_names.remove("self") preprocess_parameter_names.sort() valid_processor_keys = image_processor._valid_processor_keys valid_processor_keys.sort() self.assertEqual(preprocess_parameter_names, valid_processor_keys) is_tested = True # validation done by @filter_out_non_signature_kwargs decorator if hasattr(image_processor.preprocess, "_filter_out_non_signature_kwargs"): if hasattr(self.image_processor_tester, "prepare_image_inputs"): inputs = self.image_processor_tester.prepare_image_inputs() elif hasattr(self.image_processor_tester, "prepare_video_inputs"): inputs = self.image_processor_tester.prepare_video_inputs() else: self.skipTest(reason="No valid input preparation method found") with warnings.catch_warnings(record=True) as raised_warnings: warnings.simplefilter("always") image_processor(inputs, extra_argument=True) messages = " ".join([str(w.message) for w in raised_warnings]) self.assertGreaterEqual(len(raised_warnings), 1) self.assertIn("extra_argument", messages) is_tested = True if not is_tested: self.skipTest(reason="No validation found for `preprocess` method") @slow @require_torch_gpu @require_vision def test_can_compile_fast_image_processor(self): if self.fast_image_processing_class is None: self.skipTest("Skipping compilation test as fast image processor is not defined") if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") torch.compiler.reset() input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8) image_processor = self.fast_image_processing_class(**self.image_processor_dict) output_eager = image_processor(input_image, device=torch_device, return_tensors="pt") image_processor = torch.compile(image_processor, mode="reduce-overhead") output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt") torch.testing.assert_close(output_eager.pixel_values, output_compiled.pixel_values, rtol=1e-4, atol=1e-4) class AnnotationFormatTestMixin: # this mixin adds a test to assert that usages of the # to-be-deprecated `AnnotionFormat` continue to be # supported for the time being def test_processor_can_use_legacy_annotation_format(self): image_processor_dict = self.image_processor_tester.prepare_image_processor_dict() fixtures_path = pathlib.Path(__file__).parent / "fixtures" / "tests_samples" / "COCO" with open(fixtures_path / "coco_annotations.txt", "r") as f: detection_target = json.loads(f.read()) detection_annotations = {"image_id": 39769, "annotations": detection_target} detection_params = { "images": Image.open(fixtures_path / "000000039769.png"), "annotations": detection_annotations, "return_tensors": "pt", } with open(fixtures_path / "coco_panoptic_annotations.txt", "r") as f: panoptic_target = json.loads(f.read()) panoptic_annotations = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": panoptic_target} masks_path = pathlib.Path(fixtures_path / "coco_panoptic") panoptic_params = { "images": Image.open(fixtures_path / "000000039769.png"), "annotations": panoptic_annotations, "return_tensors": "pt", "masks_path": masks_path, } test_cases = [ ("coco_detection", detection_params), ("coco_panoptic", panoptic_params), (AnnotionFormat.COCO_DETECTION, detection_params), (AnnotionFormat.COCO_PANOPTIC, panoptic_params), (AnnotationFormat.COCO_DETECTION, detection_params), (AnnotationFormat.COCO_PANOPTIC, panoptic_params), ] def _compare(a, b) -> None: if isinstance(a, (dict, BatchFeature)): self.assertEqual(a.keys(), b.keys()) for k, v in a.items(): _compare(v, b[k]) elif isinstance(a, list): self.assertEqual(len(a), len(b)) for idx in range(len(a)): _compare(a[idx], b[idx]) elif isinstance(a, torch.Tensor): torch.testing.assert_close(a, b, rtol=1e-3, atol=1e-3) elif isinstance(a, str): self.assertEqual(a, b) for annotation_format, params in test_cases: with self.subTest(annotation_format): image_processor_params = {**image_processor_dict, **{"format": annotation_format}} image_processor_first = self.image_processing_class(**image_processor_params) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(tmpdirname) image_processor_second = self.image_processing_class.from_pretrained(tmpdirname) # check the 'format' key exists and that the dicts of the # first and second processors are equal self.assertIn("format", image_processor_first.to_dict().keys()) self.assertEqual(image_processor_second.to_dict(), image_processor_first.to_dict()) # perform encoding using both processors and compare # the resulting BatchFeatures first_encoding = image_processor_first(**params) second_encoding = image_processor_second(**params) _compare(first_encoding, second_encoding)
transformers/tests/test_image_processing_common.py/0
{ "file_path": "transformers/tests/test_image_processing_common.py", "repo_id": "transformers", "token_count": 12839 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, EarlyStoppingCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainerState, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch from transformers.trainer_callback import ExportableState if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS, TRAINER_STATE_NAME from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class MyTestExportableCallback(TrainerCallback, ExportableState): def __init__(self, my_test_state="test"): self.my_test_state = my_test_state def state(self): return { "args": { "my_test_state": self.my_test_state, }, } class MyTestTrainerCallback(TrainerCallback): "A callback that registers the events that goes through." def __init__(self, my_test_state="test"): self.events = [] self.my_test_state = my_test_state def on_init_end(self, args, state, control, **kwargs): self.events.append("on_init_end") def on_train_begin(self, args, state, control, **kwargs): self.events.append("on_train_begin") def on_train_end(self, args, state, control, **kwargs): self.events.append("on_train_end") def on_epoch_begin(self, args, state, control, **kwargs): self.events.append("on_epoch_begin") def on_epoch_end(self, args, state, control, **kwargs): self.events.append("on_epoch_end") def on_step_begin(self, args, state, control, **kwargs): self.events.append("on_step_begin") def on_pre_optimizer_step(self, args, state, control, **kwargs): self.events.append("on_pre_optimizer_step") def on_optimizer_step(self, args, state, control, **kwargs): self.events.append("on_optimizer_step") def on_step_end(self, args, state, control, **kwargs): self.events.append("on_step_end") def on_evaluate(self, args, state, control, **kwargs): self.events.append("on_evaluate") def on_predict(self, args, state, control, **kwargs): self.events.append("on_predict") def on_save(self, args, state, control, **kwargs): self.events.append("on_save") def on_log(self, args, state, control, **kwargs): self.events.append("on_log") def on_prediction_step(self, args, state, control, **kwargs): self.events.append("on_prediction_step") @require_torch class TrainerCallbackTest(unittest.TestCase): def setUp(self): self.output_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.output_dir) def get_trainer(self, a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. train_dataset = RegressionDataset(length=train_len) eval_dataset = RegressionDataset(length=eval_len) config = RegressionModelConfig(a=a, b=b) model = RegressionPreTrainedModel(config) args = TrainingArguments(self.output_dir, disable_tqdm=disable_tqdm, report_to=[], **kwargs) return Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=callbacks, ) def check_callbacks_equality(self, cbs1, cbs2): self.assertEqual(len(cbs1), len(cbs2)) # Order doesn't matter cbs1 = sorted(cbs1, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) cbs2 = sorted(cbs2, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__) for cb1, cb2 in zip(cbs1, cbs2): if isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1, cb2) elif isinstance(cb1, type) and not isinstance(cb2, type): self.assertEqual(cb1, cb2.__class__) elif not isinstance(cb1, type) and isinstance(cb2, type): self.assertEqual(cb1.__class__, cb2) else: self.assertEqual(cb1, cb2) def get_expected_events(self, trainer): expected_events = ["on_init_end", "on_train_begin"] step = 0 train_dl_len = len(trainer.get_eval_dataloader()) evaluation_events = ["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs): expected_events.append("on_epoch_begin") for _ in range(train_dl_len): step += 1 expected_events += ["on_step_begin", "on_pre_optimizer_step", "on_optimizer_step", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log") if trainer.args.eval_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0 or step == trainer.state.max_steps: expected_events.append("on_save") expected_events.append("on_epoch_end") if trainer.args.eval_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def test_init_callback(self): trainer = self.get_trainer() expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # Callbacks passed at init are added to the default callbacks trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(MyTestTrainerCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback trainer = self.get_trainer(disable_tqdm=True) expected_callbacks = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_add_remove_callback(self): expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback] trainer = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(DefaultFlowCallback) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb = trainer.pop_callback(DefaultFlowCallback) self.assertEqual(cb.__class__, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(DefaultFlowCallback) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) # We can also add, pop, or remove by instance trainer = self.get_trainer() cb = trainer.callback_handler.callbacks[0] trainer.remove_callback(cb) expected_callbacks.remove(DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer = self.get_trainer() cb1 = trainer.callback_handler.callbacks[0] cb2 = trainer.pop_callback(cb1) self.assertEqual(cb1, cb2) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) trainer.add_callback(cb1) expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_event_flow(self): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested with warnings.catch_warnings(): warnings.simplefilter(action="ignore", category=UserWarning) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # Independent log/save/eval trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, eval_strategy="steps") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_strategy="epoch") trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # A bit of everything trainer = self.get_trainer( callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, eval_strategy="steps", ) trainer.train() events = trainer.callback_handler.callbacks[-2].events self.assertEqual(events, self.get_expected_events(trainer)) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning") as warn_mock: trainer = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], ) assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0] def test_stateful_callbacks(self): # Use something with non-defaults cb = EarlyStoppingCallback(early_stopping_patience=5, early_stopping_threshold=0.2) trainer = self.get_trainer( callbacks=[cb], load_best_model_at_end=True, save_strategy="steps", eval_strategy="steps", save_steps=2, eval_steps=2, max_steps=2, ) trainer.train() # Create a new trainer with defaults trainer = self.get_trainer( callbacks=[EarlyStoppingCallback()], load_best_model_at_end=True, save_strategy="steps", eval_strategy="steps", save_steps=2, eval_steps=2, max_steps=2, restore_callback_states_from_checkpoint=True, ) # Load it back in and verify values checkpoint = os.path.join(self.output_dir, "checkpoint-2") trainer.train(resume_from_checkpoint=checkpoint) cb = [ callback for callback in trainer.callback_handler.callbacks if isinstance(callback, EarlyStoppingCallback) ][0] assert cb.early_stopping_patience == 5 assert cb.early_stopping_threshold == 0.2 def test_stateful_mixed_callbacks(self): # Use two callbacks, one stateful one not # Use something with non-defaults cbs = [ MyTestTrainerCallback(my_test_state="another value"), EarlyStoppingCallback(early_stopping_patience=5, early_stopping_threshold=0.2), ] trainer = self.get_trainer( callbacks=cbs, load_best_model_at_end=True, save_strategy="steps", eval_strategy="steps", save_steps=2, eval_steps=2, max_steps=2, ) trainer.train() # Create a new trainer with defaults trainer = self.get_trainer( callbacks=[EarlyStoppingCallback(), MyTestTrainerCallback()], load_best_model_at_end=True, save_strategy="steps", eval_strategy="steps", save_steps=2, eval_steps=2, max_steps=2, restore_callback_states_from_checkpoint=True, ) # Load it back in and verify values checkpoint = os.path.join(self.output_dir, "checkpoint-2") trainer.train(resume_from_checkpoint=checkpoint) cbs = [ callback for callback in trainer.callback_handler.callbacks if isinstance(callback, (EarlyStoppingCallback, MyTestTrainerCallback)) ] assert len(cbs) == 2 my_test, early_stopping = cbs assert early_stopping.early_stopping_patience == 5 assert early_stopping.early_stopping_threshold == 0.2 assert my_test.my_test_state == "test" def test_stateful_duplicate_callbacks(self): # Use something with non-defaults cbs = [MyTestExportableCallback("first"), MyTestExportableCallback("second")] trainer = self.get_trainer( callbacks=cbs, load_best_model_at_end=True, save_strategy="steps", eval_strategy="steps", save_steps=2, eval_steps=2, max_steps=2, ) trainer.train() # Create a new trainer with defaults trainer = self.get_trainer( callbacks=[MyTestExportableCallback(), MyTestExportableCallback()], load_best_model_at_end=True, save_strategy="steps", eval_strategy="steps", save_steps=2, eval_steps=2, max_steps=2, restore_callback_states_from_checkpoint=True, ) # Load it back in and verify values checkpoint = os.path.join(self.output_dir, "checkpoint-2") trainer.train(resume_from_checkpoint=checkpoint) cbs = [ callback for callback in trainer.callback_handler.callbacks if isinstance(callback, MyTestExportableCallback) ] assert len(cbs) == 2 assert cbs[0].my_test_state == "first" assert cbs[1].my_test_state == "second" def test_missing_stateful_callback(self): cb = EarlyStoppingCallback() trainer = self.get_trainer( callbacks=[cb], load_best_model_at_end=True, save_strategy="steps", eval_strategy="steps", save_steps=2, eval_steps=2, max_steps=2, ) trainer.train() # Create a new trainer with defaults trainer = self.get_trainer( save_strategy="steps", eval_strategy="steps", save_steps=2, eval_steps=2, max_steps=2, restore_callback_states_from_checkpoint=True, ) # Load it back in and verify values checkpoint = os.path.join(self.output_dir, "checkpoint-2") # warning should be emitted for not-present callbacks with patch("transformers.trainer.logger.warning") as warn_mock: trainer.train(resume_from_checkpoint=checkpoint) assert "EarlyStoppingCallback" in warn_mock.call_args[0][0] def test_stateful_control(self): trainer = self.get_trainer( max_steps=2, save_strategy="steps", save_steps=2, ) trainer.train() # Load it back in and verify values trainer = self.get_trainer(max_steps=2, restore_callback_states_from_checkpoint=True) checkpoint = os.path.join(self.output_dir, "checkpoint-2") trainer.state = TrainerState.load_from_json(os.path.join(checkpoint, TRAINER_STATE_NAME)) trainer._load_callback_state() assert trainer.control.should_training_stop
transformers/tests/trainer/test_trainer_callback.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_callback.py", "repo_id": "transformers", "token_count": 7470 }
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest from parameterized import parameterized from transformers import set_seed from transformers.testing_utils import ( is_torch_available, require_gptq, require_non_xpu, require_read_token, require_torch, require_torch_gpu, slow, torch_device, ) if is_torch_available(): import torch from transformers import ( AutoModelForCausalLM, AutoTokenizer, DynamicCache, GenerationConfig, GPT2LMHeadModel, LlamaConfig, SinkCache, StaticCache, convert_and_export_with_cache, ) from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_3 @require_torch class CacheTest(unittest.TestCase): def test_dynamic_cache_retrocompatibility(self): """Tests that we can convert back and forth between the legacy cache format and DynamicCache""" legacy_cache = () new_cache = DynamicCache() # Creates a new cache with 10 layers in both formats for layer_idx in range(10): new_key = torch.rand((2, 4, 8, 16)) new_value = torch.rand((2, 4, 8, 16)) new_cache.update(new_key, new_value, layer_idx) legacy_cache += ((new_key, new_value),) # Sanity check 1: they must have the same shapes self.assertTrue(len(legacy_cache), len(new_cache)) for layer_idx in range(10): self.assertTrue(len(legacy_cache[layer_idx]), len(legacy_cache[layer_idx])) for key_value_idx in range(2): self.assertTrue( legacy_cache[layer_idx][key_value_idx].shape == new_cache[layer_idx][key_value_idx].shape ) # Sanity check 2: we can get the sequence length in multiple ways with DynamicCache, and they return the # expected value self.assertTrue(legacy_cache[0][0].shape[-2] == new_cache[0][0].shape[-2] == new_cache.get_seq_length() == 8) # Sanity check 3: they must be equal, and both support indexing for layer_idx in range(10): for key_value_idx in range(2): self.assertTrue( torch.allclose(new_cache[layer_idx][key_value_idx], legacy_cache[layer_idx][key_value_idx]) ) # Test 1: We can convert from legacy to new with no changes from_legacy = DynamicCache.from_legacy_cache(legacy_cache) for layer_idx in range(10): for key_value_idx in range(2): self.assertTrue( torch.allclose(from_legacy[layer_idx][key_value_idx], legacy_cache[layer_idx][key_value_idx]) ) # Test 2: We can convert from new to legacy with no changes to_legacy = new_cache.to_legacy_cache() for layer_idx in range(10): for key_value_idx in range(2): self.assertTrue( torch.allclose(to_legacy[layer_idx][key_value_idx], new_cache[layer_idx][key_value_idx]) ) def test_reorder_cache_retrocompatibility(self): """Tests that Cache.reorder_cache is retrocompatible with the legacy code path""" legacy_reorder_fn = GPT2LMHeadModel._reorder_cache # An example of a legacy `_reorder_cache` function legacy_cache = () new_cache = DynamicCache() # Creates a new cache with 10 layers in both formats for layer_idx in range(10): new_key = torch.rand((4, 4, 8, 16)) new_value = torch.rand((4, 4, 8, 16)) new_cache.update(new_key, new_value, layer_idx) legacy_cache += ((new_key, new_value),) # Let's create some dummy beam indices. From the shape above, it is equivalent to the case where num_beams=4 # and batch_size=1 beam_idx = torch.randint(low=0, high=4, size=(4,)) legacy_cache_reordered = legacy_reorder_fn(legacy_cache, beam_idx) new_cache.reorder_cache(beam_idx) # Let's check that the results are the same for layer_idx in range(10): for key_value_idx in range(2): self.assertTrue( torch.allclose( new_cache[layer_idx][key_value_idx], legacy_cache_reordered[layer_idx][key_value_idx] ) ) def test_static_cache_mha_mqa_gqa(self): """ Tests that static cache works with multi-head attention (MHA), grouped query attention (GQA), and multi-query attention (MQA) """ def _random_kvs(config): # shape for key and values: (batch_size, num_heads, seq_len, head_dim) random_keys = torch.rand( (1, config.num_key_value_heads, 1, config.hidden_size // config.num_attention_heads), device=torch_device, ) random_values = torch.rand( (1, config.num_key_value_heads, 1, config.hidden_size // config.num_attention_heads), device=torch_device, ) return random_keys, random_values mha_config = LlamaConfig(num_attention_heads=32) mha_static_cache = StaticCache(config=mha_config, batch_size=1, max_cache_len=10, device=torch_device) cached_keys, cached_values = mha_static_cache.update( *_random_kvs(mha_config), 0, cache_kwargs={"cache_position": torch.arange(1).to(torch_device)} ) self.assertTrue(cached_keys.shape == (1, 32, 10, 128)) self.assertTrue(cached_values.shape == (1, 32, 10, 128)) gqa_config = LlamaConfig(num_attention_heads=32, num_key_value_heads=4) gqa_static_cache = StaticCache(config=gqa_config, batch_size=1, max_cache_len=10, device=torch_device) cached_keys, cached_values = gqa_static_cache.update( *_random_kvs(gqa_config), 0, cache_kwargs={"cache_position": torch.arange(1).to(torch_device)} ) self.assertTrue(cached_keys.shape == (1, 4, 10, 128)) self.assertTrue(cached_values.shape == (1, 4, 10, 128)) mqa_config = LlamaConfig(num_attention_heads=32, num_key_value_heads=1) mqa_static_cache = StaticCache(config=mqa_config, batch_size=1, max_cache_len=10, device=torch_device) cached_keys, cached_values = mqa_static_cache.update( *_random_kvs(mqa_config), 0, cache_kwargs={"cache_position": torch.arange(1).to(torch_device)} ) self.assertTrue(cached_keys.shape == (1, 1, 10, 128)) self.assertTrue(cached_values.shape == (1, 1, 10, 128)) @slow @require_read_token def test_static_cache_exportability(self): """ Tests that static cache works with `torch.export()` """ if not is_torch_greater_or_equal_than_2_3: self.skipTest(reason="This test requires torch >= 2.3 to run.") set_seed(0) device = "cpu" dtype = "bfloat16" cache_implementation = "static" attn_implementation = "sdpa" # Export and ExecuTorch only works for SdpaAttention batch_size = 1 max_cache_len = 1234 model = AutoModelForCausalLM.from_pretrained( "google/gemma-2b", device_map=device, torch_dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_cache_len, cache_config={ "batch_size": batch_size, "max_cache_len": max_cache_len, "device": device, }, ), ) # Check if cache config is passed through correctly self.assertEqual(model.generation_config.use_cache, True) self.assertEqual(model.generation_config.cache_implementation, cache_implementation) self.assertEqual(model.generation_config.max_length, max_cache_len) self.assertTrue(model.generation_config.cache_config is not None) self.assertEqual(model.generation_config.cache_config.batch_size, batch_size) self.assertEqual(model.generation_config.cache_config.max_cache_len, max_cache_len) exported_program = convert_and_export_with_cache(model) # Check if the exported model is configured with the `StaticCache` correctly n_static_key_caches = n_static_value_caches = 0 for buffer_name, buffer in exported_program.named_buffers(): if buffer_name.startswith("static_cache.key_cache"): self.assertTrue(buffer.shape[0] == batch_size) self.assertTrue(buffer.shape[2] == max_cache_len) n_static_key_caches = n_static_key_caches + 1 if buffer_name.startswith("static_cache.value_cache"): self.assertTrue(buffer.shape[0] == batch_size) self.assertTrue(buffer.shape[2] == max_cache_len) n_static_value_caches = n_static_value_caches + 1 self.assertEqual(n_static_key_caches, model.config.num_hidden_layers) self.assertEqual(n_static_value_caches, model.config.num_hidden_layers) @require_torch_gpu @slow class CacheIntegrationTest(unittest.TestCase): def test_dynamic_cache_hard(self): tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", padding_side="left") model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16 ) inputs = tokenizer(["Here's everything I know about cats. Cats"], return_tensors="pt").to(model.device) # DynamicCache and the legacy cache format should be equivalent set_seed(0) gen_out_legacy = model.generate(**inputs, do_sample=True, max_new_tokens=256) set_seed(0) gen_out = model.generate(**inputs, do_sample=True, max_new_tokens=256, past_key_values=DynamicCache()) self.assertListEqual(gen_out_legacy.tolist(), gen_out.tolist()) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) expected_text = ( "Here's everything I know about cats. Cats are mysterious creatures. They can't talk, and they don't like " "to be held. They don't play fetch, and they don't like to be hugged. But they do like to be petted.\n" "Cats are also very independent. They don't like to be told what to do, and they don't like to be told " "what to eat. They are also very territorial. They don't like to share their food or their toys.\nCats " "are also very curious. They like to explore, and they like to play. They are also very fast. They can " "run very fast, and they can jump very high.\nCats are also very smart. They can learn tricks, and they " "can solve problems. They are also very playful. They like to play with toys, and they like to play with " "other cats.\nCats are also very affectionate. They like to be petted, and they like to be held. They " "also like to be scratched.\nCats are also very clean. They like to groom themselves, and they like to " "clean their litter box.\nCats are also very independent. They don't" ) self.assertEqual(decoded[0], expected_text) def test_dynamic_cache_batched(self): tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", padding_side="left") tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16 ) inputs = tokenizer(["A sequence: 1, 2, 3, 4, 5", "A sequence: A, B, C"], padding=True, return_tensors="pt").to( model.device ) gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10, past_key_values=DynamicCache()) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) expected_text = ["A sequence: 1, 2, 3, 4, 5, 6, 7, 8,", "A sequence: A, B, C, D, E, F, G, H"] self.assertListEqual(decoded, expected_text) def test_dynamic_cache_beam_search(self): tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", padding_side="left") model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", device_map="auto", torch_dtype=torch.float16 ) inputs = tokenizer(["The best color is"], return_tensors="pt").to(model.device) gen_out = model.generate( **inputs, do_sample=False, max_new_tokens=20, num_beams=2, num_return_sequences=2, ) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) expected_text = [ "The best color is the one that makes you feel good.\nThe best color is the one that makes you feel good", "The best color is the one that suits you.\nThe best color is the one that suits you. The", ] self.assertListEqual(decoded, expected_text) def test_hybrid_cache_n_sequences(self): tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b") model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-9b", device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="eager", ) inputs = tokenizer(["Hello I am doing"], return_tensors="pt").to(model.device) gen_out = model.generate( **inputs, do_sample=False, max_new_tokens=20, num_return_sequences=2, num_beams=2, ) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) expected_text = [ "Hello I am doing a project for my school and I am trying to make a program that will allow me to input a", "Hello I am doing a project for my school and I am trying to make a program that will allow me to use a", ] self.assertListEqual(decoded, expected_text) @require_non_xpu @require_gptq def test_sink_cache_hard(self): tokenizer = AutoTokenizer.from_pretrained("TheBloke/LLaMa-7B-GPTQ") model = AutoModelForCausalLM.from_pretrained("TheBloke/LLaMa-7B-GPTQ", device_map="auto") inputs = tokenizer(["Vaswani et al. (2017) introduced the Transformers"], return_tensors="pt").to(model.device) # Set up the SinkCache. Using a small window length to contain computational complexity. If this example is run # without a SinkCache, the last few tokens are gibberish (ends in "of the of the of a of a of") cache = SinkCache(window_length=508, num_sink_tokens=4) gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=3000, past_key_values=cache) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) self.assertTrue(decoded[0].endswith("to perform a variety of tasks. The Transformer is a neural network")) def test_sink_cache_iterative_prompts(self): """Tests that SinkCache supports more than one new token at once, when shifting the cache""" tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") model = AutoModelForCausalLM.from_pretrained( "HuggingFaceH4/zephyr-7b-beta", device_map="auto", torch_dtype=torch.float16 ) prompt = ( "Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences " "and must-see attractions." ) # Prepare generation settings cache = SinkCache(window_length=256, num_sink_tokens=4) input_ids = torch.tensor([], device=model.device, dtype=torch.int) for _ in range(3): # Tokenize the prompt with the correct chat template chat = [{"role": "user", "content": prompt}] tokenized_chat = tokenizer.apply_chat_template(chat, return_tensors="pt", add_generation_prompt=True).to( model.device ) input_ids = torch.cat((input_ids, tokenized_chat), dim=1) # Perform the generation gen_out = model.generate( input_ids, do_sample=False, max_new_tokens=100, past_key_values=cache, use_cache=True ) input_ids = gen_out # We went well beyond the cache length self.assertTrue(input_ids.shape[1] > cache.get_max_cache_shape() * 1.5) # And it still produces a coherent english decoded = tokenizer.batch_decode(input_ids, skip_special_tokens=True) last_output = ( "<|assistant|>\nAs the sun began to set over the Pacific Ocean, I found myself standing on the shores of " "Waikiki Beach, my heart filled with awe and wonder. I had just returned from a two-week journey to the " "beautiful island of Hawaii, and it had been an unforgettable experience filled with cultural experiences " "and must-see attractions that left me breathless.\n\nOne of the most memorable experiences of my trip " "was visiting the historic district of Honolulu. Here," ) self.assertTrue(decoded[0].endswith(last_output)) @require_torch_gpu @parameterized.expand( [ ("eager", "static"), ("sdpa", "static"), ] ) def test_static_cache_greedy_decoding_pad_left(self, attn_implementation, cache_implementation): EXPECTED_GENERATION = [ "The best color is the one that complements the skin tone of the", "We should not undermind the issues at hand.\nWe should not undermind the issues", ] tokenizer = AutoTokenizer.from_pretrained( "NousResearch/Llama-2-7b-chat-hf", padding_side="left", pad_token="<s>" ) model = AutoModelForCausalLM.from_pretrained( "NousResearch/Llama-2-7b-chat-hf", torch_dtype=torch.bfloat16, attn_implementation=attn_implementation, ).to(torch_device) inputs = tokenizer( ["The best color is", "We should not undermind the issues at hand"], padding=True, return_tensors="pt" ).to(model.device) set_seed(0) gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) with self.subTest(f"{attn_implementation}, dynamic"): self.assertListEqual(decoded, EXPECTED_GENERATION) set_seed(0) model.generation_config.cache_implementation = cache_implementation gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) with self.subTest(f"{attn_implementation}, static, eager"): self.assertListEqual(decoded, EXPECTED_GENERATION) set_seed(0) model.forward = torch.compile(model.forward) gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) with self.subTest(f"{attn_implementation}, static, compiled"): self.assertListEqual(decoded, EXPECTED_GENERATION) @require_torch_gpu @parameterized.expand( [ ("eager", "static"), ("sdpa", "static"), ] ) def test_static_cache_greedy_decoding_pad_right(self, attn_implementation, cache_implementation): EXPECTED_GENERATION = [ "The best color isЋ the one that complements the skin tone of", "We should not undermind the issues at hand.\nWe should not undermind the issues", ] tokenizer = AutoTokenizer.from_pretrained( "NousResearch/Llama-2-7b-chat-hf", padding_side="right", pad_token="<s>" ) model = AutoModelForCausalLM.from_pretrained( "NousResearch/Llama-2-7b-chat-hf", torch_dtype=torch.bfloat16, attn_implementation=attn_implementation, ).to(torch_device) inputs = tokenizer( ["The best color is", "We should not undermind the issues at hand"], padding=True, return_tensors="pt" ).to(model.device) set_seed(0) gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) with self.subTest(f"{attn_implementation}, dynamic"): self.assertListEqual(decoded, EXPECTED_GENERATION) set_seed(0) model.generation_config.cache_implementation = cache_implementation gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) with self.subTest(f"{attn_implementation}, static, eager"): self.assertListEqual(decoded, EXPECTED_GENERATION) def test_dynamic_cache_extra_left_padding(self): """Tests that adding extra left-padding does not affect the generation with the dynamic cache""" EXPECTED_GENERATION = [ "The best color is the one that complements the skin tone of the", "We should not undermind the issues at hand.\nWe should not undermind the issues", ] tokenizer = AutoTokenizer.from_pretrained( "NousResearch/Llama-2-7b-chat-hf", padding_side="left", pad_token="<s>" ) model = AutoModelForCausalLM.from_pretrained( "NousResearch/Llama-2-7b-chat-hf", torch_dtype=torch.bfloat16, ).to(torch_device) inputs = tokenizer( ["The best color is", "We should not undermind the issues at hand"], padding=True, return_tensors="pt" ).to(model.device) gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) self.assertListEqual(decoded, EXPECTED_GENERATION) # Now with extra left-padding inputs_expanded = tokenizer( ["The best color is", "We should not undermind the issues at hand"], padding=True, return_tensors="pt", pad_to_multiple_of=32, ).to(model.device) self.assertTrue(inputs.input_ids.shape[1] < inputs_expanded.input_ids.shape[1]) gen_out = model.generate(**inputs_expanded, do_sample=False, max_new_tokens=10) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) self.assertListEqual(decoded, EXPECTED_GENERATION) @parameterized.expand( [ "static", ] ) def test_static_cache_extra_left_padding(self, cache_implementation): """Tests that adding extra left-padding does not affect the generation with the static cache""" EXPECTED_GENERATION = [ "The best color is the one that complements the skin tone of the", "We should not undermind the issues at hand.\nWe should not undermind the issues", ] tokenizer = AutoTokenizer.from_pretrained( "NousResearch/Llama-2-7b-chat-hf", padding_side="left", pad_token="<s>" ) model = AutoModelForCausalLM.from_pretrained( "NousResearch/Llama-2-7b-chat-hf", torch_dtype=torch.bfloat16, ).to(torch_device) inputs = tokenizer( ["The best color is", "We should not undermind the issues at hand"], padding=True, return_tensors="pt" ).to(model.device) model.generation_config.cache_implementation = cache_implementation gen_out = model.generate(**inputs, do_sample=False, max_new_tokens=10) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) self.assertListEqual(decoded, EXPECTED_GENERATION) # Now with extra left-padding inputs_expanded = tokenizer( ["The best color is", "We should not undermind the issues at hand"], padding=True, return_tensors="pt", pad_to_multiple_of=32, ).to(model.device) self.assertTrue(inputs.input_ids.shape[1] < inputs_expanded.input_ids.shape[1]) gen_out = model.generate(**inputs_expanded, do_sample=False, max_new_tokens=10) decoded = tokenizer.batch_decode(gen_out, skip_special_tokens=True) self.assertListEqual(decoded, EXPECTED_GENERATION) @unittest.skip(reason="TODO @gante static cache's does not support beam search yet") def test_static_cache_beam_search(self): pass @require_torch_gpu def test_offloaded_cache_equivalent_to_dynamic_cache(self): """Tests that OffloadedCache produces the same result as the default DynamicCache""" model_name = "microsoft/Phi-3-mini-4k-instruct" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16) device = model.device input_text = "Fun fact:" inputs = tokenizer(input_text, return_tensors="pt").to(device) common = { "num_beams": 4, "num_beam_groups": 2, "num_return_sequences": 4, "diversity_penalty": 1.0, "max_new_tokens": 20, "early_stopping": True, } original = GenerationConfig(**common) offloaded = GenerationConfig(cache_implementation="offloaded", **common) original_outputs = model.generate(generation_config=original, **inputs) offloaded_outputs = model.generate(generation_config=offloaded, **inputs) for original_output, offloaded_output in zip(original_outputs, offloaded_outputs): assert torch.all(original_output == offloaded_output).item() @require_torch_gpu def test_offloaded_cache_uses_less_memory_than_dynamic_cache(self): """Tests that OffloadedCache uses less memory than the default DynamicCache""" model_name = "microsoft/Phi-3-mini-4k-instruct" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16) device = model.device input_text = "Fun fact:" inputs = tokenizer(input_text, return_tensors="pt").to(device) common = { "num_beams": 4, "num_beam_groups": 2, "num_return_sequences": 4, "diversity_penalty": 1.0, "max_new_tokens": 20, "early_stopping": True, } original = GenerationConfig(**common) offloaded = GenerationConfig(cache_implementation="offloaded", **common) torch.cuda.reset_peak_memory_stats(device) model.generate(generation_config=original, **inputs) original_peak_memory = torch.cuda.max_memory_allocated(device) torch.cuda.reset_peak_memory_stats(device) model.generate(generation_config=offloaded, **inputs) offloaded_peak_memory = torch.cuda.max_memory_allocated(device) assert offloaded_peak_memory < original_peak_memory @require_torch_gpu def test_cache_copy(self): model_name = "microsoft/Phi-3-mini-4k-instruct" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cuda", torch_dtype=torch.bfloat16) prompt_cache = StaticCache( config=model.config, max_batch_size=1, max_cache_len=1024, device="cuda", dtype=torch.bfloat16 ) INITIAL_PROMPT = "You are a helpful assistant. " inputs_initial_prompt = tokenizer(INITIAL_PROMPT, return_tensors="pt").to("cuda") # This is the common prompt cached, we need to run forward without grad to be abel to copy with torch.no_grad(): prompt_cache = model(**inputs_initial_prompt, past_key_values=prompt_cache).past_key_values prompts = ["Help me to write a blogpost about travelling.", "What is the capital of France?"] responses = [] for prompt in prompts: new_inputs = tokenizer(INITIAL_PROMPT + prompt, return_tensors="pt").to("cuda") past_key_values = copy.deepcopy(prompt_cache) outputs = model.generate(**new_inputs, past_key_values=past_key_values, max_new_tokens=40) response = tokenizer.batch_decode(outputs)[0] responses.append(response) EXPECTED_DECODED_TEXT = [ "You are a helpful assistant. Help me to write a blogpost about travelling.\n\nTraveling is an enriching experience that broadens our horizons and exposes us to new cultures, landscapes, and people. Whether it's a week", 'You are a helpful assistant. What is the capital of France?\n\n\n## Response:Paris is the capital of France.\n\n\n\n\n\n## Query:\n\nIn a detailed analysis, compare the economic impacts of the introduction of the' ] # fmt: skip self.assertTrue(responses == EXPECTED_DECODED_TEXT)
transformers/tests/utils/test_cache_utils.py/0
{ "file_path": "transformers/tests/utils/test_cache_utils.py", "repo_id": "transformers", "token_count": 12990 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class HfArgumentParserTest(unittest.TestCase): def test_set_level(self): logger = logging.get_logger() # the current default level is logging.WARNING level_origin = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity()) # restore to the original level logging.set_verbosity(level_origin) def test_integration(self): level_origin = logging.get_verbosity() logger = logging.get_logger("transformers.models.bart.tokenization_bart") msg = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, msg + "\n") # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, "") # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(logger) as cl: logger.warning(msg) self.assertEqual(cl.out, msg + "\n") # restore to the original level logging.set_verbosity(level_origin) @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _ = logging.get_logger("transformers.models.bart.tokenization_bart") env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) env_level = logging.log_levels[env_level_str] current_level = logging.get_verbosity() self.assertEqual( env_level, current_level, f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}", ) # restore to the original level os.environ["TRANSFORMERS_VERBOSITY"] = "" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="super-error") def test_env_invalid_override(self): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() logger = logging.logging.getLogger() with CaptureLogger(logger) as cl: # this action activates the env var logging.get_logger("transformers.models.bart.tokenization_bart") self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out) # no need to restore as nothing was changed def test_advisory_warnings(self): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() logger = logging.get_logger("transformers.models.bart.tokenization_bart") msg = "Testing 1, 2, 3" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1"): # nothing should be logged as env var disables this method with CaptureLogger(logger) as cl: logger.warning_advice(msg) self.assertEqual(cl.out, "") with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=""): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(logger) as cl: logger.warning_advice(msg) self.assertEqual(cl.out, msg + "\n") def test_set_progress_bar_enabled(): disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
transformers/tests/utils/test_logging.py/0
{ "file_path": "transformers/tests/utils/test_logging.py", "repo_id": "transformers", "token_count": 2007 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import importlib from pathlib import Path # Test all the extensions added in the setup FILES_TO_FIND = [ "kernels/rwkv/wkv_cuda.cu", "kernels/rwkv/wkv_op.cpp", "kernels/deformable_detr/ms_deform_attn.h", "kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh", "kernels/falcon_mamba/selective_scan_with_ln_interface.py", "kernels/falcon_mamba/__init__.py", "kernels/__init__.py", "models/graphormer/algos_graphormer.pyx", ] def test_custom_files_are_present(transformers_path): # Test all the extensions added in the setup for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.") args = parser.parse_args() if args.check_lib: transformers_module = importlib.import_module("transformers") transformers_path = Path(transformers_module.__file__).parent else: transformers_path = Path.cwd() / "build/lib/transformers" if not test_custom_files_are_present(transformers_path): raise ValueError("The built release does not contain the custom files. Fix this before going further!")
transformers/utils/check_build.py/0
{ "file_path": "transformers/utils/check_build.py", "repo_id": "transformers", "token_count": 677 }
import ast from collections import defaultdict # Function to perform topological sorting def topological_sort(dependencies: dict): # Nodes are the name of the models to convert (we only add those to the graph) nodes = {node.rsplit("modular_", 1)[1].replace(".py", "") for node in dependencies.keys()} # This will be a graph from models to convert, to models to convert that should be converted before (as they are a dependency) graph = {} name_mapping = {} for node, deps in dependencies.items(): node_name = node.rsplit("modular_", 1)[1].replace(".py", "") dep_names = {dep.split(".")[-2] for dep in deps} dependencies = {dep for dep in dep_names if dep in nodes and dep != node_name} graph[node_name] = dependencies name_mapping[node_name] = node sorting_list = [] while len(graph) > 0: # Find the nodes with 0 out-degree leaf_nodes = {node for node in graph if len(graph[node]) == 0} # Add them to the list sorting_list += list(leaf_nodes) # Remove the leafs from the graph (and from the deps of other nodes) graph = {node: deps - leaf_nodes for node, deps in graph.items() if node not in leaf_nodes} return [name_mapping[x] for x in sorting_list] # Function to extract class and import info from a file def extract_classes_and_imports(file_path): with open(file_path, "r") as file: tree = ast.parse(file.read(), filename=file_path) imports = set() for node in ast.walk(tree): if isinstance(node, (ast.Import, ast.ImportFrom)): module = node.module if isinstance(node, ast.ImportFrom) else None if module and (".modeling_" in module or "transformers.models" in module): imports.add(module) return imports # Function to map dependencies between classes def map_dependencies(py_files): dependencies = defaultdict(set) # First pass: Extract all classes and map to files for file_path in py_files: # dependencies[file_path].add(None) class_to_file = extract_classes_and_imports(file_path) for module in class_to_file: dependencies[file_path].add(module) return dependencies def find_priority_list(py_files): dependencies = map_dependencies(py_files) ordered_classes = topological_sort(dependencies) return ordered_classes
transformers/utils/create_dependency_mapping.py/0
{ "file_path": "transformers/utils/create_dependency_mapping.py", "repo_id": "transformers", "token_count": 870 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import time from typing import Dict, List from get_ci_error_statistics import get_jobs from slack_sdk import WebClient client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) def handle_test_results(test_results): expressions = test_results.split(" ") failed = 0 success = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(expressions): if "failed" in expression: failed += int(expressions[i - 1]) if "passed" in expression: success += int(expressions[i - 1]) return failed, success, time_spent def extract_first_line_failure(failures_short_lines): failures = {} file = None in_error = False for line in failures_short_lines.split("\n"): if re.search(r"_ \[doctest\]", line): in_error = True file = line.split(" ")[2] elif in_error and not line.split(" ")[0].isdigit(): failures[file] = line in_error = False return failures class Message: def __init__(self, title: str, doc_test_results: Dict): self.title = title self.n_success = sum(job_result["n_success"] for job_result in doc_test_results.values()) self.n_failures = sum(job_result["n_failures"] for job_result in doc_test_results.values()) self.n_tests = self.n_success + self.n_failures # Failures and success of the modeling tests self.doc_test_results = doc_test_results @property def time(self) -> str: all_results = [*self.doc_test_results.values()] time_spent = [r["time_spent"].split(", ")[0] for r in all_results if len(r["time_spent"])] total_secs = 0 for time in time_spent: time_parts = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(time_parts) == 1: time_parts = [0, 0, time_parts[0]] hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return f"{int(hours)}h{int(minutes)}m{int(seconds)}s" @property def header(self) -> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def no_failures(self) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def failures(self) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" f" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def category_failures(self) -> List[Dict]: failure_blocks = [] MAX_ERROR_TEXT = 3000 - len("The following examples had failures:\n\n\n\n") - len("[Truncated]\n") line_length = 40 category_failures = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(v, dict)} def single_category_failures(category, failures): text = "" if len(failures) == 0: return "" text += f"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" for idx, failure in enumerate(failures): new_text = text + f"`{failure}`\n" if len(new_text) > MAX_ERROR_TEXT: text = text + "[Truncated]\n" break text = new_text return text for category, failures in category_failures.items(): report = single_category_failures(category, failures) if len(report) == 0: continue block = { "type": "section", "text": { "type": "mrkdwn", "text": f"The following examples had failures:\n\n\n{report}\n", }, } failure_blocks.append(block) return failure_blocks @property def payload(self) -> str: blocks = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend(self.category_failures) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(blocks) @staticmethod def error_out(): payload = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("Sending the following payload") print(json.dumps({"blocks": json.loads(payload)})) client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, text="There was an issue running the tests.", blocks=payload, ) def post(self): print("Sending the following payload") print(json.dumps({"blocks": json.loads(self.payload)})) text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." self.thread_ts = client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, blocks=self.payload, text=text, ) def get_reply_blocks(self, job_name, job_link, failures, text): # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary MAX_ERROR_TEXT = 3000 - len("[Truncated]") failure_text = "" for key, value in failures.items(): new_text = failure_text + f"*{key}*\n_{value}_\n\n" if len(new_text) > MAX_ERROR_TEXT: # `failure_text` here has length <= 3000 failure_text = failure_text + "[Truncated]" break # `failure_text` here has length <= MAX_ERROR_TEXT failure_text = new_text title = job_name content = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: content["accessory"] = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title, "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failure_text}}, ] def post_reply(self): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") sorted_dict = sorted(self.doc_test_results.items(), key=lambda t: t[0]) for job_name, job_result in sorted_dict: if len(job_result["failures"]) > 0: text = f"*Num failures* :{len(job_result['failed'])} \n" failures = job_result["failures"] blocks = self.get_reply_blocks(job_name, job_result["job_link"], failures, text=text) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=SLACK_REPORT_CHANNEL_ID, text=f"Results for {job_name}", blocks=blocks, thread_ts=self.thread_ts["ts"], ) time.sleep(1) def retrieve_artifact(name: str): _artifact = {} if os.path.exists(name): files = os.listdir(name) for file in files: try: with open(os.path.join(name, file), encoding="utf-8") as f: _artifact[file.split(".")[0]] = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(name, file)}.") from e return _artifact def retrieve_available_artifacts(): class Artifact: def __init__(self, name: str): self.name = name self.paths = [] def __str__(self): return self.name def add_path(self, path: str): self.paths.append({"name": self.name, "path": path}) _available_artifacts: Dict[str, Artifact] = {} directories = filter(os.path.isdir, os.listdir()) for directory in directories: artifact_name = directory if artifact_name not in _available_artifacts: _available_artifacts[artifact_name] = Artifact(artifact_name) _available_artifacts[artifact_name].add_path(directory) return _available_artifacts if __name__ == "__main__": SLACK_REPORT_CHANNEL_ID = os.environ["SLACK_REPORT_CHANNEL"] github_actions_jobs = get_jobs( workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"] ) artifact_name_to_job_map = {} for job in github_actions_jobs: for step in job["steps"]: if step["name"].startswith("Test suite reports artifacts: "): artifact_name = step["name"][len("Test suite reports artifacts: ") :] artifact_name_to_job_map[artifact_name] = job break available_artifacts = retrieve_available_artifacts() doc_test_results = {} # `artifact_key` is the artifact path for artifact_key, artifact_obj in available_artifacts.items(): artifact_path = artifact_obj.paths[0] if not artifact_path["path"].startswith("doc_tests_gpu_test_reports_"): continue # change "_" back to "/" (to show the job name as path) job_name = artifact_path["path"].replace("doc_tests_gpu_test_reports_", "").replace("_", "/") # This dict (for each job) will contain all the information relative to each doc test job, in particular: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' job_result = {} doc_test_results[job_name] = job_result job = artifact_name_to_job_map[artifact_path["path"]] job_result["job_link"] = job["html_url"] job_result["category"] = "Python Examples" if job_name.startswith("src/") else "MD Examples" artifact = retrieve_artifact(artifact_path["path"]) if "stats" in artifact: failed, success, time_spent = handle_test_results(artifact["stats"]) job_result["n_failures"] = failed job_result["n_success"] = success job_result["time_spent"] = time_spent[1:-1] + ", " job_result["failed"] = [] job_result["failures"] = {} all_failures = extract_first_line_failure(artifact["failures_short"]) for line in artifact["summary_short"].split("\n"): if re.search("FAILED", line): line = line.replace("FAILED ", "") line = line.split()[0].replace("\n", "") if "::" in line: file_path, test = line.split("::") else: file_path, test = line, line job_result["failed"].append(test) failure = all_failures[test] if test in all_failures else "N/A" job_result["failures"][test] = failure # Save and to be uploaded as artifact os.makedirs("doc_test_results", exist_ok=True) with open("doc_test_results/doc_test_results.json", "w", encoding="UTF-8") as fp: json.dump(doc_test_results, fp, ensure_ascii=False, indent=4) message = Message("🤗 Results of the doc tests.", doc_test_results) message.post() message.post_reply()
transformers/utils/notification_service_doc_tests.py/0
{ "file_path": "transformers/utils/notification_service_doc_tests.py", "repo_id": "transformers", "token_count": 6488 }
from transformers import PretrainedConfig class CustomConfig(PretrainedConfig): model_type = "custom" def __init__(self, attribute=1, **kwargs): self.attribute = attribute super().__init__(**kwargs) class NoSuperInitConfig(PretrainedConfig): model_type = "custom" def __init__(self, attribute=1, **kwargs): self.attribute = attribute
transformers/utils/test_module/custom_configuration.py/0
{ "file_path": "transformers/utils/test_module/custom_configuration.py", "repo_id": "transformers", "token_count": 136 }
# Using LLaMA models with TRL We've begun rolling out examples to use Meta's LLaMA models in `trl` (see [Meta's LLaMA release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) for the original LLaMA model). ## Efficient training strategies Even training the smallest LLaMA model requires an enormous amount of memory. Some quick math: in bf16, every parameter uses 2 bytes (in fp32 4 bytes) in addition to 8 bytes used, e.g., in the Adam optimizer (see the [performance docs](https://huggingface.co/docs/transformers/perf_train_gpu_one#optimizer) in Transformers for more info). So a 7B parameter model would use `(2+8)*7B=70GB` just to fit in memory and would likely need more when you compute intermediate values such as attention scores. So you couldn’t train the model even on a single 80GB A100 like that. You can use some tricks, like more efficient optimizers of half-precision training, to squeeze a bit more into memory, but you’ll run out sooner or later. Another option is to use Parameter-Efficient Fine-Tuning (PEFT) techniques, such as the [`peft`](https://github.com/huggingface/peft) library, which can perform low-rank adaptation (LoRA) on a model loaded in 8-bit. For more on `peft` + `trl`, see the [docs](https://huggingface.co/docs/trl/sentiment_tuning_peft). Loading the model in 8bit reduces the memory footprint drastically since you only need one byte per parameter for the weights (e.g. 7B LlaMa is 7GB in memory). Instead of training the original weights directly, LoRA adds small adapter layers on top of some specific layers (usually the attention layers); thus, the number of trainable parameters is drastically reduced. In this scenario, a rule of thumb is to allocate ~1.2-1.4GB per billion parameters (depending on the batch size and sequence length) to fit the entire fine-tuning setup. This enables fine-tuning larger models (up to 50-60B scale models on a NVIDIA A100 80GB) at low cost. Now we can fit very large models into a single GPU, but the training might still be very slow. The simplest strategy in this scenario is data parallelism: we replicate the same training setup into separate GPUs and pass different batches to each GPU. With this, you can parallelize the forward/backward passes of the model and scale with the number of GPUs. ![chapter10_ddp.png](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/chapter10_ddp.png) We use either the `transformers.Trainer` or `accelerate`, which both support data parallelism without any code changes, by simply passing arguments when calling the scripts with `torchrun` or `accelerate launch`. The following runs a training script with 8 GPUs on a single machine with `accelerate` and `torchrun`, respectively. ```bash accelerate launch --multi_gpu --num_machines 1 --num_processes 8 my_accelerate_script.py torchrun --nnodes 1 --nproc_per_node 8 my_torch_script.py ``` ## Supervised fine-tuning Before we start training reward models and tuning our model with RL, it helps if the model is already good in the domain we are interested in. In our case, we want it to answer questions, while for other use cases, we might want it to follow instructions, in which case instruction tuning is a great idea. The easiest way to achieve this is by continuing to train the language model with the language modeling objective on texts from the domain or task. The [StackExchange dataset](https://huggingface.co/datasets/HuggingFaceH4/stack-exchange-preferences) is enormous (over 10 million instructions), so we can easily train the language model on a subset of it. There is nothing special about fine-tuning the model before doing RLHF - it’s just the causal language modeling objective from pretraining that we apply here. To use the data efficiently, we use a technique called packing: instead of having one text per sample in the batch and then padding to either the longest text or the maximal context of the model, we concatenate a lot of texts with a EOS token in between and cut chunks of the context size to fill the batch without any padding. ![chapter10_preprocessing-clm.png](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/chapter10_preprocessing-clm.png) With this approach the training is much more efficient as each token that is passed through the model is also trained in contrast to padding tokens which are usually masked from the loss. If you don't have much data and are more concerned about occasionally cutting off some tokens that are overflowing the context you can also use a classical data loader. The packing is handled by the `ConstantLengthDataset` and we can then use the `Trainer` after loading the model with `peft`. First, we load the model in int8, prepare it for training, and then add the LoRA adapters. ```python # load model in 8bit model = AutoModelForCausalLM.from_pretrained( args.model_path, load_in_8bit=True, device_map={"": Accelerator().local_process_index} ) model = prepare_model_for_kbit_training(model) # add LoRA to model lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) ``` We train the model for a few thousand steps with the causal language modeling objective and save the model. Since we will tune the model again with different objectives, we merge the adapter weights with the original model weights. **Disclaimer:** due to LLaMA's license, we release only the adapter weights for this and the model checkpoints in the following sections. You can apply for access to the base model's weights by filling out Meta AI's [form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform) and then converting them to the 🤗 Transformers format by running this [script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). Note that you'll also need to install 🤗 Transformers from source until the `v4.28` is released. Now that we have fine-tuned the model for the task, we are ready to train a reward model. ## Reward modeling and human preferences In principle, we could fine-tune the model using RLHF directly with the human annotations. However, this would require us to send some samples to humans for rating after each optimization iteration. This is expensive and slow due to the number of training samples needed for convergence and the inherent latency of human reading and annotator speed. A trick that works well instead of direct feedback is training a reward model on human annotations collected before the RL loop. The goal of the reward model is to imitate how a human would rate a text. There are several possible strategies to build a reward model: the most straightforward way would be to predict the annotation (e.g. a rating score or a binary value for “good”/”bad”). In practice, what works better is to predict the ranking of two examples, where the reward model is presented with two candidates `(y_k, y_j)` for a given prompt `x` and has to predict which one would be rated higher by a human annotator. With the StackExchange dataset, we can infer which of the two answers was preferred by the users based on the score. With that information and the loss defined above, we can then modify the `transformers.Trainer` by adding a custom loss function. ```python class RewardTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): rewards_j = model(input_ids=inputs["input_ids_j"], attention_mask=inputs["attention_mask_j"])[0] rewards_k = model(input_ids=inputs["input_ids_k"], attention_mask=inputs["attention_mask_k"])[0] loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean() if return_outputs: return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k} return loss ``` We utilize a subset of a 100,000 pair of candidates and evaluate on a held-out set of 50,000. With a modest training batch size of 4, we train the Llama model using the LoRA `peft` adapter for a single epoch using the Adam optimizer with BF16 precision. Our LoRA configuration is: ```python peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) ``` As detailed in the next section, the resulting adapter can be merged into the frozen model and saved for further downstream use. ## Reinforcement Learning from Human Feedback With the fine-tuned language model and the reward model at hand, we are now ready to run the RL loop. It follows roughly three steps: 1. Generate responses from prompts, 2. Rate the responses with the reward model, 3. Run a reinforcement learning policy-optimization step with the ratings. The Query and Response prompts are templated as follows before being tokenized and passed to the model: ```bash Question: <Query> Answer: <Response> ``` The same template was used for SFT, RM and RLHF stages. Once more, we utilize `peft` for memory-efficient training, which offers an extra advantage in the RLHF context. Here, the reference model and policy share the same base, the SFT model, which we load in 8-bit and freeze during training. We exclusively optimize the policy's LoRA weights using PPO while sharing the base model's weights. ```python for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): question_tensors = batch["input_ids"] # sample from the policy and to generate responses response_tensors = ppo_trainer.generate( question_tensors, return_prompt=False, length_sampler=output_length_sampler, **generation_kwargs, ) batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) # Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs] # Run PPO step stats = ppo_trainer.step(question_tensors, response_tensors, rewards) # Log stats to Wandb ppo_trainer.log_stats(stats, batch, rewards) ``` For the rest of the details and evaluation, please refer to our [blog post on StackLLaMA](https://huggingface.co/blog/stackllama).
trl/docs/source/using_llama_models.md/0
{ "file_path": "trl/docs/source/using_llama_models.md", "repo_id": "trl", "token_count": 2985 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from huggingface_hub import ModelCard from transformers import HfArgumentParser @dataclass class ScriptArguments: r""" Arguments for the script. Args: push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the dataset to the Hugging Face Hub. repo_id (`str`, *optional*, defaults to `"trl-lib/tldr"`): Hugging Face repository ID to push the dataset to. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of workers to use for dataset processing. """ push_to_hub: bool = field( default=False, metadata={"help": "Whether to push the dataset to the Hugging Face Hub."}, ) repo_id: str = field( default="trl-lib/tldr", metadata={"help": "Hugging Face repository ID to push the dataset to."}, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of workers to use for dataset processing."}, ) def to_prompt_completion(example): tldr_format_str = "SUBREDDIT: r/{subreddit}\n\nTITLE: {title}\n\nPOST: {post}\n\nTL;DR:" prompt = tldr_format_str.format(subreddit=example["subreddit"], title=example["title"], post=example["post"]) completion = " " + example["summary"] # Add a space to separate the prompt from the completion return {"prompt": prompt, "completion": completion} model_card = ModelCard(""" --- tags: [trl] --- # TL;DR Dataset ## Summary The TL;DR dataset is a processed version of Reddit posts, specifically curated to train models using the [TRL library](https://github.com/huggingface/trl) for summarization tasks. It leverages the common practice on Reddit where users append "TL;DR" (Too Long; Didn't Read) summaries to lengthy posts, providing a rich source of paired text data for training summarization models. ## Data Structure - **Format**: [Standard](https://huggingface.co/docs/trl/main/dataset_formats#standard) - **Type**: [Prompt-completion](https://huggingface.co/docs/trl/main/dataset_formats#prompt-completion) Columns: - `"prompt"`: The unabridged Reddit post. - `"completion"`: The concise "TL;DR" summary appended by the author. This structure enables models to learn the relationship between detailed content and its abbreviated form, enhancing their summarization capabilities. ## Generation script The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/tldr.py). """) if __name__ == "__main__": parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] # Filtered reddit TL;DR dataset from https://github.com/openai/summarize-from-feedback?tab=readme-ov-file#reddit-tldr-dataset data_files = { "train": "https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered/train.jsonl", "validation": "https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered/valid.jsonl", "test": "https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered/test.jsonl", } dataset = load_dataset("json", data_files=data_files) dataset = dataset.map( to_prompt_completion, num_proc=script_args.dataset_num_proc, remove_columns=["id", "subreddit", "title", "post", "summary"], ) if script_args.push_to_hub: dataset.push_to_hub(script_args.repo_id) model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
trl/examples/datasets/tldr.py/0
{ "file_path": "trl/examples/datasets/tldr.py", "repo_id": "trl", "token_count": 1501 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from transformers import HfArgumentParser from vllm import LLM, SamplingParams from trl import HfPairwiseJudge, OpenAIPairwiseJudge """ Examples: python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --num_examples 1000 Model win rate: 31.40% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --judge_model gpt-3.5-turbo-0125 --num_examples 1000 Model win rate: 51.60% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --judge_model gpt-4o-mini --num_examples 1000 Model win rate: 51.20% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --num_examples 1000 Model win rate: 46.30% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --judge_model gpt-3.5-turbo-0125 --num_examples 1000 Model win rate: 52.50% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --judge_model gpt-4o-mini --num_examples 1000 Model win rate: 63.00% """ @dataclass class ScriptArguments: r""" Arguments for the script. Args: model_name_or_path (`str`): Model name or path to the model to evaluate. judge_model (`str`, *optional*, defaults to `"meta-llama/Meta-Llama-3-70B-Instruct"`): Model name or path to the model to use as a judge. E.g., 'gpt-3.5-turbo-0125' or 'meta-llama/Meta-Llama-3-70B-Instruct'. num_examples (`int` or `None`, *optional*, defaults to `None`): Number of examples to evaluate. """ model_name_or_path: str = field(metadata={"help": "Model name or path to the model to evaluate."}) judge_model: str = field( default="meta-llama/Meta-Llama-3-70B-Instruct", metadata={ "help": "Model name or path to the model to use as a judge. E.g., 'gpt-3.5-turbo-0125' or " "'meta-llama/Meta-Llama-3-70B-Instruct'." }, ) num_examples: Optional[int] = field(default=None, metadata={"help": "Number of examples to evaluate."}) # Parse the arguments parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] # Load the dataset dataset = load_dataset("trl-lib/tldr", split="validation") if script_args.num_examples is not None: dataset = dataset.select(range(script_args.num_examples)) # Extract the prompts and reference completions prompts = dataset["prompt"] reference_completions = dataset["completion"] # Generate the model completions sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=200) # very generous max token length llm = LLM(model=script_args.model_name_or_path, tensor_parallel_size=1) outputs = llm.generate(prompts, sampling_params) model_completions = [output.outputs[0].text.strip() for output in outputs] # Judge the outputs if "gpt" in script_args.judge_model: judge = OpenAIPairwiseJudge(script_args.judge_model) else: judge = HfPairwiseJudge(script_args.judge_model) completions = [[c0, c1] for c0, c1 in zip(reference_completions, model_completions)] best_idxs = judge.judge(prompts, completions) model_win_rate = best_idxs.count(1) / len(best_idxs) print(f"Model win rate: {model_win_rate*100:.2f}%")
trl/examples/scripts/evals/judge_tldr.py/0
{ "file_path": "trl/examples/scripts/evals/judge_tldr.py", "repo_id": "trl", "token_count": 1497 }
[tool.ruff] target-version = "py37" line-length = 119 [tool.ruff.lint] ignore = [ "B028", # warning without explicit stacklevel "C408", # dict() calls (stylistic) "C901", # function complexity "E501", ] extend-select = ["E", "F", "I", "W", "UP", "B", "T", "C"] [tool.ruff.lint.per-file-ignores] # Allow prints in auxiliary scripts "examples/**.py" = ["T201"] "scripts/**.py" = ["T201"] # Ignore import violations in all `__init__.py` files. "__init__.py" = ["F401"] [tool.ruff.lint.isort] lines-after-imports = 2 known-first-party = ["trl"]
trl/pyproject.toml/0
{ "file_path": "trl/pyproject.toml", "repo_id": "trl", "token_count": 227 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from transformers import AutoTokenizer, GenerationConfig from trl import AutoModelForCausalLMWithValueHead from trl.core import LengthSampler from trl.extras import BestOfNSampler def queries_to_scores(list_of_strings): return [torch.rand(1).item() for _ in list_of_strings] class BestOfNSamplerTester(unittest.TestCase): """ Tests the BestOfNSampler class """ ref_model_name = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" output_length_sampler = LengthSampler(2, 6) model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name) tokenizer = AutoTokenizer.from_pretrained(ref_model_name) tokenizer.pad_token = tokenizer.eos_token output_length_sampler = LengthSampler(2, 6) def test_different_input_types(self): r""" Tests if the different input types normalizer works """ generation_config = GenerationConfig( min_length=-1, top_k=0.0, top_p=1.0, do_sample=True, pad_token_id=self.tokenizer.eos_token_id, ) output_length_sampler = LengthSampler(2, 6) best_of_n = BestOfNSampler( self.model, self.tokenizer, queries_to_scores, length_sampler=output_length_sampler, generation_config=generation_config, ) queries = ["hello world", "goodbye world"] tokenized_queries = [self.tokenizer.encode(query) for query in queries] various_queries_formats = [ (tokenized_queries[0], 1), (tokenized_queries, 2), (torch.tensor(tokenized_queries[1]), 1), ([torch.tensor(query) for query in tokenized_queries], 2), ] for q, expected_length in various_queries_formats: results = best_of_n.generate(q) self.assertIsInstance(results, list) self.assertEqual(len(results), expected_length) def test_different_sample_sizes_and_n_candidates_values(self): r""" Tests different sample sizes and n_candidates values """ generation_config = GenerationConfig( min_length=-1, top_k=0.0, top_p=1.0, do_sample=True, pad_token_id=self.tokenizer.eos_token_id, ) output_length_sampler = LengthSampler(6, 10) for sample_value, n_candidates_values, expected in [ (4, 2, 2), (10, 3, 3), (6, 4, 4), ]: best_of_n = BestOfNSampler( self.model, self.tokenizer, queries_to_scores, length_sampler=output_length_sampler, generation_config=generation_config, sample_size=sample_value, n_candidates=n_candidates_values, ) queries = ["hello world", "troll the world"] tokenized_queries = [self.tokenizer.encode(query) for query in queries] results = best_of_n.generate(tokenized_queries) for result in results: self.assertEqual(len(result), expected)
trl/tests/test_best_of_n_sampler.py/0
{ "file_path": "trl/tests/test_best_of_n_sampler.py", "repo_id": "trl", "token_count": 1678 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest from trl import AllTrueJudge, HfPairwiseJudge, PairRMJudge from .testing_utils import RandomBinaryJudge, require_llm_blender class TestJudges(unittest.TestCase): def _get_prompts_and_pairwise_completions(self): prompts = ["The capital of France is", "The biggest planet in the solar system is"] completions = [["Paris", "Marseille"], ["Saturn", "Jupiter"]] return prompts, completions def _get_prompts_and_single_completions(self): prompts = ["What's the capital of France?", "What's the color of the sky?"] completions = ["Marseille", "blue"] return prompts, completions def test_all_true_judge(self): judge = AllTrueJudge(judges=[RandomBinaryJudge(), RandomBinaryJudge()]) prompts, completions = self._get_prompts_and_single_completions() judgements = judge.judge(prompts=prompts, completions=completions) self.assertEqual(len(judgements), 2) self.assertTrue(all(judgement in {0, 1, -1} for judgement in judgements)) @unittest.skip("This test needs to be run manually since it requires a valid Hugging Face API key.") def test_hugging_face_judge(self): judge = HfPairwiseJudge() prompts, completions = self._get_prompts_and_pairwise_completions() ranks = judge.judge(prompts=prompts, completions=completions) self.assertEqual(len(ranks), 2) self.assertTrue(all(isinstance(rank, int) for rank in ranks)) self.assertEqual(ranks, [0, 1]) def load_pair_rm_judge(self): # When using concurrent tests, PairRM may fail to load the model while another job is still downloading. # This is a workaround to retry loading the model a few times. for _ in range(5): try: return PairRMJudge() except ValueError: time.sleep(5) raise ValueError("Failed to load PairRMJudge") @require_llm_blender def test_pair_rm_judge(self): judge = self.load_pair_rm_judge() prompts, completions = self._get_prompts_and_pairwise_completions() ranks = judge.judge(prompts=prompts, completions=completions) self.assertEqual(len(ranks), 2) self.assertTrue(all(isinstance(rank, int) for rank in ranks)) self.assertEqual(ranks, [0, 1]) @require_llm_blender def test_pair_rm_judge_return_scores(self): judge = self.load_pair_rm_judge() prompts, completions = self._get_prompts_and_pairwise_completions() probs = judge.judge(prompts=prompts, completions=completions, return_scores=True) self.assertEqual(len(probs), 2) self.assertTrue(all(isinstance(prob, float) for prob in probs)) self.assertTrue(all(0 <= prob <= 1 for prob in probs))
trl/tests/test_judges.py/0
{ "file_path": "trl/tests/test_judges.py", "repo_id": "trl", "token_count": 1297 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from datasets import load_dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer from transformers.testing_utils import require_peft from transformers.utils import is_peft_available from trl import XPOConfig, XPOTrainer, is_llm_blender_available from .testing_utils import RandomPairwiseJudge if is_peft_available(): from peft import LoraConfig, get_peft_model class TestXPOTrainer(unittest.TestCase): def setUp(self): self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.model = AutoModelForCausalLM.from_pretrained(self.model_id) self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.reward_model = AutoModelForSequenceClassification.from_pretrained(self.model_id, num_labels=1) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.tokenizer.pad_token = self.tokenizer.eos_token @parameterized.expand([("standard_prompt_only",), ("conversational_prompt_only",)]) def test_xpo_trainer_training(self, config_name): with tempfile.TemporaryDirectory() as tmp_dir: training_args = XPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) trainer = XPOTrainer( model=self.model, ref_model=self.ref_model, reward_model=self.reward_model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) @require_peft def test_training_with_peft(self): lora_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM") with tempfile.TemporaryDirectory() as tmp_dir: training_args = XPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, learning_rate=5.0e-7, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") trainer = XPOTrainer( model=self.model, reward_model=self.reward_model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) @require_peft def test_training_with_peft_and_ref_model(self): lora_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM") with tempfile.TemporaryDirectory() as tmp_dir: training_args = XPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, learning_rate=5.0e-7, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") trainer = XPOTrainer( model=self.model, ref_model=self.ref_model, reward_model=self.reward_model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_config, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) @require_peft def test_training_with_peft_model_and_peft_config(self): model_lora_config = LoraConfig(r=8, lora_alpha=16, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM") model = get_peft_model(self.model, model_lora_config) # we want only the "train adapter" to be trained lora_train_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM") with tempfile.TemporaryDirectory() as tmp_dir: training_args = XPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, learning_rate=5.0e-7, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") trainer = XPOTrainer( model=model, reward_model=self.reward_model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], peft_config=lora_train_config, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) @unittest.skipIf(not is_llm_blender_available(), "llm-blender is not available") @parameterized.expand([("standard_prompt_only",), ("conversational_prompt_only",)]) def test_xpo_trainer_judge_training(self, config_name): with tempfile.TemporaryDirectory() as tmp_dir: training_args = XPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) judge = RandomPairwiseJudge() trainer = XPOTrainer( model=self.model, ref_model=self.ref_model, judge=judge, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1])
trl/tests/test_xpo_trainer.py/0
{ "file_path": "trl/tests/test_xpo_trainer.py", "repo_id": "trl", "token_count": 3756 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os from copy import deepcopy from typing import Optional import torch import torch.nn as nn from accelerate import PartialState from huggingface_hub import hf_hub_download from huggingface_hub.utils import ( EntryNotFoundError, HFValidationError, LocalEntryNotFoundError, RepositoryNotFoundError, ) from safetensors.torch import load_file as safe_load_file from transformers import GenerationMixin, PreTrainedModel, is_torch_npu_available, is_torch_xpu_available from transformers.utils import is_peft_available if is_peft_available(): from peft import ( PeftConfig, PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PromptLearningConfig, get_peft_model, prepare_model_for_kbit_training, ) from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled LAYER_PATTERNS = [ "transformer.h.{layer}", "model.decoder.layers.{layer}", "gpt_neox.layers.{layer}", "model.layers.{layer}", ] class PreTrainedModelWrapper(nn.Module): r""" A wrapper class around a (`transformers.PreTrainedModel`) to be compatible with the (`~transformers.PreTrained`) class in order to keep some attributes and methods of the (`~transformers.PreTrainedModel`) class. Attributes: pretrained_model (`transformers.PreTrainedModel`): The model to be wrapped. parent_class (`transformers.PreTrainedModel`): The parent class of the model to be wrapped. supported_args (`list`): The list of arguments that are supported by the wrapper class. """ transformers_parent_class = None supported_args = None supported_modules = ("v_head",) supported_rm_modules = ("score",) supported_pretrained_model_architectures = ( (PreTrainedModel) if not is_peft_available() else (PreTrainedModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM) ) def __init__( self, pretrained_model=None, score_module=None, supports_rm_adapter=False, rm_adapter_name=None, **kwargs ): super().__init__() self.pretrained_model = pretrained_model self.config = pretrained_model.config self.prepare_inputs_for_generation = pretrained_model.prepare_inputs_for_generation self.is_loaded_in_8bit = getattr(pretrained_model, "is_loaded_in_8bit", False) self.is_loaded_in_4bit = getattr(pretrained_model, "is_loaded_in_4bit", False) self.is_sequential_parallel = False if hasattr(pretrained_model, "gradient_checkpointing_disable"): self.gradient_checkpointing_disable = pretrained_model.gradient_checkpointing_disable if hasattr(pretrained_model, "gradient_checkpointing_enable"): self.gradient_checkpointing_enable = pretrained_model.gradient_checkpointing_enable if hasattr(pretrained_model, "enable_input_require_grads"): self.enable_input_require_grads = pretrained_model.enable_input_require_grads self.supports_rm_adapter = supports_rm_adapter self.rm_adapter_name = rm_adapter_name self.policy_adapter_name = "default" if score_module is not None: self.score = score_module @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates a new model from a pretrained model from `transformers`. The pretrained model is loaded using the `from_pretrained` method of the `transformers.PreTrainedModel` class. The arguments that are specific to the `transformers.PreTrainedModel` class are passed along this method and filtered out from the `kwargs` argument. Args: pretrained_model_name_or_path (`str` or `transformers.PreTrainedModel`): The path to the pretrained model or its name. *model_args (`list`, *optional*)): Additional positional arguments passed along to the underlying model's `from_pretrained` method. **kwargs (`dict`, *optional*): Additional keyword arguments passed along to the underlying model's `from_pretrained` method. We also pre-process the kwargs to extract the arguments that are specific to the `transformers.PreTrainedModel` class and the arguments that are specific to trl models. The kwargs also support `prepare_model_for_kbit_training` arguments from `peft` library. """ if kwargs is not None: peft_config = kwargs.pop("peft_config", None) reward_adapter = kwargs.pop("reward_adapter", None) reward_adapter_name = kwargs.pop("reward_adapter_name", "reward_adapter") is_trainable = kwargs.pop("is_trainable", False) trl_model_args, pretrained_kwargs, peft_quantization_kwargs = cls._split_kwargs(kwargs) token = pretrained_kwargs.get("token", None) else: peft_config = None is_trainable = False trl_model_args = {} pretrained_kwargs = {} peft_quantization_kwargs = {} token = None if reward_adapter is not None and not isinstance(reward_adapter, str): raise ValueError( "The `reward_adapter` argument should be a string representing the name of local path or the Hub id to the Reward Modeling adapter." ) is_peft_model = False current_device = cls._get_current_device() if isinstance(pretrained_model_name_or_path, str): is_loaded_in_8bit = pretrained_kwargs["load_in_8bit"] if "load_in_8bit" in pretrained_kwargs else False is_loaded_in_4bit = pretrained_kwargs["load_in_4bit"] if "load_in_4bit" in pretrained_kwargs else False else: is_loaded_in_8bit = getattr(pretrained_model_name_or_path, "is_loaded_in_8bit", False) is_loaded_in_4bit = getattr(pretrained_model_name_or_path, "is_loaded_in_4bit", False) if (is_loaded_in_8bit or is_loaded_in_4bit) and "device_map" not in pretrained_kwargs: # warn users logging.warning( "The `device_map` argument is not provided. We will override the device_map argument." " to set the entire" " model on the current device. If you want to set the model on multiple devices, please provide" " a custom `device_map` argument." ) pretrained_kwargs["device_map"] = {"": current_device} if is_peft_available() and peft_config is not None and not isinstance(peft_config, PeftConfig): raise ValueError("The `peft_config` argument should be an instance of `peft.PeftConfig` class.") # First, load the pre-trained model using the parent-class # either `AutoModelForCausalLM` or `AutoModelForSeq2SeqLM` if isinstance(pretrained_model_name_or_path, str): if is_peft_available(): try: # If there is a trained peft adapter in the hub, load its config. remote_adapter_config = hf_hub_download( pretrained_model_name_or_path, "adapter_config.json", token=token, ) except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): remote_adapter_config = None else: remote_adapter_config = None local_adapter_present = os.path.exists(os.path.join(pretrained_model_name_or_path, "adapter_config.json")) if (local_adapter_present or remote_adapter_config is not None) and is_peft_available(): if peft_config is not None: logging.warning( "`peft_config` argument ignored since a peft config file was found in " f"{pretrained_model_name_or_path}" ) # Load the trained peft adapter config if local_adapter_present: trained_adapter_config = PeftConfig.from_pretrained(pretrained_model_name_or_path) else: remote_adapter_dir = os.path.dirname(remote_adapter_config) trained_adapter_config = PeftConfig.from_pretrained(remote_adapter_dir) # Load the pretrained base model pretrained_model = cls.transformers_parent_class.from_pretrained( trained_adapter_config.base_model_name_or_path, *model_args, **pretrained_kwargs ) # Wrap the pretrained model with the trained peft adapter pretrained_model = PeftModel.from_pretrained( pretrained_model, pretrained_model_name_or_path, is_trainable=is_trainable, token=token ) logging.info("Trained peft adapter loaded") else: pretrained_model = cls.transformers_parent_class.from_pretrained( pretrained_model_name_or_path, *model_args, **pretrained_kwargs ) if peft_config is not None: # Initialize a new peft adapter with the given config if is_loaded_in_8bit or is_loaded_in_4bit: pretrained_model = prepare_model_for_kbit_training( pretrained_model, **peft_quantization_kwargs, ) pretrained_model = get_peft_model(pretrained_model, peft_config) logging.info("peft adapter initialised") elif isinstance(pretrained_model_name_or_path, cls.supported_pretrained_model_architectures): pretrained_model = pretrained_model_name_or_path if peft_config is not None and isinstance(pretrained_model, PreTrainedModel): # Initialize a new peft adapter with the given config if is_loaded_in_8bit or is_loaded_in_4bit: pretrained_model = prepare_model_for_kbit_training( pretrained_model, **peft_quantization_kwargs, ) pretrained_model = get_peft_model(pretrained_model, peft_config) logging.info("peft adapter initialised") else: raise ValueError( "pretrained_model_name_or_path should be a string or a PreTrainedModel, " f"but is {type(pretrained_model_name_or_path)}" ) if is_peft_available(): if isinstance(pretrained_model, PeftModel): is_peft_model = True # for backward compatibility if hasattr(pretrained_model, "active_peft_config") and isinstance( pretrained_model.active_peft_config, PromptLearningConfig ): raise ValueError("PromptLearningConfig is not supported for PPO training.") # Add reward modeling adapter if specified if not is_peft_model and reward_adapter is not None: raise ValueError("reward_adapter can only be used with a PeftModel. ") elif is_peft_model and reward_adapter is not None: score_module = cls.add_and_load_reward_modeling_adapter( pretrained_model, reward_adapter, reward_adapter_name, token=token ) multi_adapter_args = { "score_module": score_module, "supports_rm_adapter": True, "rm_adapter_name": reward_adapter_name, } else: multi_adapter_args = {"supports_rm_adapter": False} # Then, create the full model by instantiating the wrapper class model = cls(pretrained_model, **multi_adapter_args, **trl_model_args) # if resume_training, load the state_dict again - this is ok since the # state_dict is removed from the model after loading it. is_resuming_training = True if isinstance(pretrained_model_name_or_path, str): safe_filename = os.path.join(pretrained_model_name_or_path, "model.safetensors") filename = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin") sharded_index_filename = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin.index.json") safe_sharded_index_filename = os.path.join(pretrained_model_name_or_path, "model.safetensors.index.json") is_sharded = False use_safe = os.path.exists(safe_filename) if not (os.path.exists(filename) or os.path.exists(safe_filename)): # Try with `pytorch_model.bin` filename, files_to_download, is_sharded, is_resuming_training = cls._get_checkpoint_from_hub( pretrained_model, pretrained_model_name_or_path, sharded_index_filename, token=token, ) # Try with safetensors if filename is None and files_to_download is None: safe_filename, files_to_download, is_sharded, is_resuming_training = cls._get_checkpoint_from_hub( pretrained_model, pretrained_model_name_or_path, safe_sharded_index_filename, token=token, model_name="model.safetensors", model_index_name="model.safetensors.index.json", ) use_safe = True else: use_safe = False loading_func = safe_load_file if use_safe else torch.load load_kwargs = {} if use_safe else {"map_location": "cpu", "weights_only": True} if is_resuming_training: if is_sharded: # download each file and add it to the state_dict state_dict = {} for shard_file in files_to_download: filename = hf_hub_download( pretrained_model_name_or_path, shard_file, token=token, ) state_dict.update(loading_func(filename, **load_kwargs)) else: state_dict = loading_func(filename if not use_safe else safe_filename, **load_kwargs) else: state_dict = pretrained_model_name_or_path.state_dict() model.is_peft_model = is_peft_model model.current_device = current_device if is_resuming_training: model.post_init(state_dict=state_dict) return model @classmethod def _get_checkpoint_from_hub( cls, pretrained_model, pretrained_model_name_or_path, index_filename, token=None, model_name="pytorch_model.bin", model_index_name="pytorch_model.bin.index.json", ): files_to_download = None filename = None is_resuming_training = True is_sharded = False try: filename = hf_hub_download( pretrained_model_name_or_path, model_name, token=token, ) # sharded except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): if os.path.exists(index_filename): index_file_name = index_filename else: try: index_file_name = hf_hub_download( pretrained_model_name_or_path, model_index_name, token=token, ) except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): # not continue training, do not have v_head weight is_resuming_training = False logging.warning( f"A {type(pretrained_model)} model is loaded from '{pretrained_model_name_or_path}', " f"and no v_head weight is found. This IS expected if you are not resuming PPO training." ) # load json if is_resuming_training: with open(index_file_name) as f: index = json.load(f) # check filename with `v_head` or any known extra module: files_to_download = set() for k, v in index["weight_map"].items(): if any(module in k for module in cls.supported_modules): files_to_download.add(v) is_sharded = True return filename, files_to_download, is_sharded, is_resuming_training @classmethod def _get_current_device(cls): r""" Get the current device. For GPU, we return the local process index using the `accelerate.PartialState` object to handle corner cases when running scripts in distributed environments. Returns: current_device (`Union[int, str]`): The current device. """ state = PartialState() if is_torch_xpu_available(): return f"xpu:{state.local_process_index}" elif is_torch_npu_available(): return f"npu:{state.local_process_index}" else: return state.local_process_index if torch.cuda.is_available() else "cpu" @classmethod def _split_kwargs(cls, kwargs): """ Separate the kwargs from the arguments that we support inside `supported_args` and the ones that we don't. """ check_peft_kwargs = False if is_peft_available(): from peft import prepare_model_for_kbit_training check_peft_kwargs = True supported_kwargs = {} unsupported_kwargs = {} peft_kwargs = {} for key, value in kwargs.items(): if key in cls.supported_args: supported_kwargs[key] = value else: unsupported_kwargs[key] = value if check_peft_kwargs: if key in prepare_model_for_kbit_training.__code__.co_varnames: peft_kwargs[key] = value if key in unsupported_kwargs: unsupported_kwargs.pop(key) return supported_kwargs, unsupported_kwargs, peft_kwargs @classmethod def add_and_load_reward_modeling_adapter( cls, pretrained_model, adapter_model_id, adapter_name="reward_model_adapter", token=None ): r""" Add and load a reward modeling adapter. This method can only be used if the model is a `PeftModel` and if you have initialized the model with the `reward_modeling_adapter_id` argument, pointing to the id of the reward modeling adapter. The latest needs also to contain the score head in order to produce the reward. """ pretrained_model.load_adapter(adapter_model_id, adapter_name, is_trainable=False) pretrained_model.train() filename = os.path.join(adapter_model_id, "adapter_model.bin") safe_loading = False if not os.path.exists(filename): try: local_filename = hf_hub_download( adapter_model_id, "adapter_model.bin", token=token, ) except Exception: filename = os.path.join(adapter_model_id, "adapter_model.safetensors") safe_loading = True if not os.path.exists(filename): try: local_filename = hf_hub_download( adapter_model_id, "adapter_model.safetensors", token=token, ) except Exception as exc: raise ValueError( "Could not find adapter model in the Hub, " "make sure you have the correct adapter model id." ) from exc else: local_filename = filename else: local_filename = filename loading_func = safe_load_file if safe_loading else torch.load load_kwargs = {} if safe_loading else {"map_location": "cpu", "weights_only": True} adapter_state_dict = loading_func(local_filename, **load_kwargs) for score_name_candidate in cls.supported_rm_modules: if any(score_name_candidate in name for name in adapter_state_dict.keys()): score_name = score_name_candidate # we have found the correct head name and can break break score_dict = {} for name, param in adapter_state_dict.items(): if score_name in name: key_name = ".".join(name.split(".")[-1:]) score_dict[key_name] = param.to(cls._get_current_device()) num_labels, hidden_dim = score_dict["weight"].shape has_bias = any("bias" in name for name in adapter_state_dict.keys()) score = nn.Linear(hidden_dim, num_labels, bias=has_bias).to( device=cls._get_current_device(), dtype=pretrained_model.dtype, ) score.load_state_dict(score_dict) for param in score.parameters(): param.requires_grad = False return score def push_to_hub(self, *args, **kwargs): r""" Push the pretrained model to the hub. This method is a wrapper around `transformers.PreTrainedModel.push_to_hub`. Please refer to the documentation of `transformers.PreTrainedModel.push_to_hub` for more information. Args: *args (`list`, *optional*): Positional arguments passed along to the underlying model's `push_to_hub` method. **kwargs (`dict`, *optional*): Keyword arguments passed along to the underlying model's `push_to_hub` method. """ raise NotImplementedError def save_pretrained(self, *args, **kwargs): r""" Save the pretrained model to a directory. This method is a wrapper around `transformers.PreTrainedModel.save_pretrained`. Please refer to the documentation of `transformers.PreTrainedModel.save_pretrained` for more information. Args: *args (`list`, *optional*): Positional arguments passed along to the underlying model's `save_pretrained` method. **kwargs (`dict`, *optional*): Keyword arguments passed along to the underlying model's `save_pretrained` method. """ state_dict = kwargs.get("state_dict") if state_dict is None: state_dict = self.state_dict() kwargs["state_dict"] = state_dict # if it is a peft model only save the `v_head` state_dict and # pop the `state_dict` from the kwargs to avoid slient bugs with `peft` if self.is_peft_model: save_path = args[0] save_path = os.path.join(save_path, "pytorch_model.bin") torch.save(state_dict, save_path) _ = kwargs.pop("state_dict", None) return self.pretrained_model.save_pretrained(*args, **kwargs) def state_dict(self, *args, **kwargs): r""" Return the state_dict of the pretrained model. """ raise NotImplementedError def post_init(self, *args, **kwargs): r""" Post initialization method. This method is called after the model is instantiated and loaded from a checkpoint. It can be used to perform additional operations such as loading the state_dict. """ raise NotImplementedError def compute_reward_score(self, input_ids, attention_mask=None, **kwargs): r""" Computes the reward score for a given input. The method has first to enable the adapter and then compute the reward score. After that the model disables the reward modeling adapter and enables the default ppo adapter again. """ if not self.supports_rm_adapter: raise ValueError("This model does not support reward modeling adapter.") # enable rm adapter self.pretrained_model.set_adapter(self.rm_adapter_name) self.pretrained_model.eval() with torch.no_grad(): base_model_output = self.pretrained_model( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, return_dict=True, **kwargs, ) last_hidden_states = base_model_output.hidden_states[-1] scores = self.score(last_hidden_states) self.pretrained_model.set_adapter(self.policy_adapter_name) self.pretrained_model.eval() return scores def create_reference_model( model: PreTrainedModelWrapper, num_shared_layers: Optional[int] = None, pattern: Optional[str] = None ) -> PreTrainedModelWrapper: """ Creates a static reference copy of a model. Note that model will be in `.eval()` mode. Args: model (`PreTrainedModelWrapper`): The model to be copied. num_shared_layers (`int`, *optional*): The number of initial layers that are shared between both models and kept frozen. pattern (`str`, *optional*): The shared layers are selected with a string pattern (e.g. "transformer.h.{layer}" for GPT2) and if a custom pattern is necessary it can be passed here. Returns: `PreTrainedModelWrapper` """ if is_deepspeed_zero3_enabled(): raise ValueError( "DeepSpeed ZeRO-3 is enabled and is not compatible with `create_reference_model()`. Please instantiate your reference model directly with `AutoModelForCausalLM.from_pretrained()`." ) parameter_names = [n for n, _ in model.named_parameters()] ref_model = deepcopy(model) # if no layers are shared, return copy of model if num_shared_layers is None: for param_name in parameter_names: param = ref_model.get_parameter(param_name) param.requires_grad = False return ref_model.eval() # identify layer name pattern if pattern is not None: pattern = pattern.format(layer=num_shared_layers) else: for pattern_candidate in LAYER_PATTERNS: pattern_candidate = pattern_candidate.format(layer=num_shared_layers) if any(pattern_candidate in name for name in parameter_names): pattern = pattern_candidate break if pattern is None: raise ValueError("Layer pattern could not be matched.") # divide parameters in shared and unshared parameter lists shared_param_list = [] unshared_param_list = [] shared_parameter = True for name, _param in model.named_parameters(): if pattern in name: shared_parameter = False if shared_parameter: shared_param_list.append(name) else: unshared_param_list.append(name) # create reference of the original parameter if they are shared for param_name in shared_param_list: param = model.get_parameter(param_name) param.requires_grad = False _ref_param = ref_model.get_parameter(param_name) # for all other parameters just make sure they don't use gradients for param_name in unshared_param_list: param = ref_model.get_parameter(param_name) param.requires_grad = False if pattern is not None and len(unshared_param_list) == 0: logging.warning("Pattern passed or found, but no layers matched in the model. Check for a typo.") return ref_model.eval() class GeometricMixtureWrapper(GenerationMixin): r""" Geometric Mixture generation wrapper that samples from the logits of two model's geometric mixture. Args: model (`PreTrainedModel`): The model to be wrapped. ref_model (`PreTrainedModel`): The reference model. generation_config (`GenerationConfig`): The generation config. mixture_coef (`float`, *optional* - default: 0.5): The mixture coefficient. """ main_input_name = "input_ids" _supports_cache_class = False _supports_static_cache = False def __init__(self, model, ref_model, generation_config, mixture_coef=0.5, device=None): super().__init__() self.model = model self.config = model.config self.ref_model = ref_model self.generation_config = generation_config self.mixture_coef = mixture_coef self.device = device def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) @torch.inference_mode() def forward(self, *args, **kwargs): model_outputs = self.model(*args, **kwargs) model_logits = model_outputs.logits ref_model_logits = self.ref_model(*args, **kwargs).logits model_outputs.logits = torch.nn.functional.log_softmax( self.mixture_coef * ref_model_logits + (1 - self.mixture_coef) * model_logits, dim=-1 ) return model_outputs def prepare_inputs_for_generation(self, *args, **kwargs): # turn off cache in the generation config kwargs["use_cache"] = False model_inputs = self.model.prepare_inputs_for_generation(*args, **kwargs) _ = self.ref_model.prepare_inputs_for_generation(*args, **kwargs) return model_inputs def _validate_model_class(self): self.model._validate_model_class() def _validate_model_kwargs(self, model_kwargs): return self.model._validate_model_kwargs(model_kwargs)
trl/trl/models/modeling_base.py/0
{ "file_path": "trl/trl/models/modeling_base.py", "repo_id": "trl", "token_count": 13951 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import textwrap from collections import defaultdict from typing import Any, Callable, Optional, Union from warnings import warn import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import PyTorchModelHubMixin from transformers import is_wandb_available from ..models import DDPOStableDiffusionPipeline from .alignprop_config import AlignPropConfig from .utils import generate_model_card, get_comet_experiment_url if is_wandb_available(): import wandb logger = get_logger(__name__) class AlignPropTrainer(PyTorchModelHubMixin): """ The AlignPropTrainer uses Deep Diffusion Policy Optimization to optimise diffusion models. Note, this trainer is heavily inspired by the work here: https://github.com/mihirp1998/AlignProp/ As of now only Stable Diffusion based pipelines are supported Attributes: config (`AlignPropConfig`): Configuration object for AlignPropTrainer. Check the documentation of `PPOConfig` for more details. reward_function (`Callable[[torch.Tensor, tuple[str], tuple[Any]], torch.Tensor]`): Reward function to be used prompt_function (`Callable[[], tuple[str, Any]]`): Function to generate prompts to guide model sd_pipeline (`DDPOStableDiffusionPipeline`): Stable Diffusion pipeline to be used for training. image_samples_hook (`Optional[Callable[[Any, Any, Any], Any]]`): Hook to be called to log images """ _tag_names = ["trl", "alignprop"] def __init__( self, config: AlignPropConfig, reward_function: Callable[[torch.Tensor, tuple[str], tuple[Any]], torch.Tensor], prompt_function: Callable[[], tuple[str, Any]], sd_pipeline: DDPOStableDiffusionPipeline, image_samples_hook: Optional[Callable[[Any, Any, Any], Any]] = None, ): if image_samples_hook is None: warn("No image_samples_hook provided; no images will be logged") self.prompt_fn = prompt_function self.reward_fn = reward_function self.config = config self.image_samples_callback = image_samples_hook accelerator_project_config = ProjectConfiguration(**self.config.project_kwargs) if self.config.resume_from: self.config.resume_from = os.path.normpath(os.path.expanduser(self.config.resume_from)) if "checkpoint_" not in os.path.basename(self.config.resume_from): # get the most recent checkpoint in this directory checkpoints = list( filter( lambda x: "checkpoint_" in x, os.listdir(self.config.resume_from), ) ) if len(checkpoints) == 0: raise ValueError(f"No checkpoints found in {self.config.resume_from}") checkpoint_numbers = sorted([int(x.split("_")[-1]) for x in checkpoints]) self.config.resume_from = os.path.join( self.config.resume_from, f"checkpoint_{checkpoint_numbers[-1]}", ) accelerator_project_config.iteration = checkpoint_numbers[-1] + 1 self.accelerator = Accelerator( log_with=self.config.log_with, mixed_precision=self.config.mixed_precision, project_config=accelerator_project_config, # we always accumulate gradients across timesteps; we want config.train.gradient_accumulation_steps to be the # number of *samples* we accumulate across, so we need to multiply by the number of training timesteps to get # the total number of optimizer steps to accumulate across. gradient_accumulation_steps=self.config.train_gradient_accumulation_steps, **self.config.accelerator_kwargs, ) is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard" if self.accelerator.is_main_process: self.accelerator.init_trackers( self.config.tracker_project_name, config=dict(alignprop_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(), init_kwargs=self.config.tracker_kwargs, ) logger.info(f"\n{config}") set_seed(self.config.seed, device_specific=True) self.sd_pipeline = sd_pipeline self.sd_pipeline.set_progress_bar_config( position=1, disable=not self.accelerator.is_local_main_process, leave=False, desc="Timestep", dynamic_ncols=True, ) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. if self.accelerator.mixed_precision == "fp16": inference_dtype = torch.float16 elif self.accelerator.mixed_precision == "bf16": inference_dtype = torch.bfloat16 else: inference_dtype = torch.float32 self.sd_pipeline.vae.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.text_encoder.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.unet.to(self.accelerator.device, dtype=inference_dtype) trainable_layers = self.sd_pipeline.get_trainable_layers() self.accelerator.register_save_state_pre_hook(self._save_model_hook) self.accelerator.register_load_state_pre_hook(self._load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if self.config.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True self.optimizer = self._setup_optimizer( trainable_layers.parameters() if not isinstance(trainable_layers, list) else trainable_layers ) self.neg_prompt_embed = self.sd_pipeline.text_encoder( self.sd_pipeline.tokenizer( [""] if self.config.negative_prompts is None else self.config.negative_prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) )[0] # NOTE: for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses # more memory self.autocast = self.sd_pipeline.autocast or self.accelerator.autocast if hasattr(self.sd_pipeline, "use_lora") and self.sd_pipeline.use_lora: unet, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) self.trainable_layers = list(filter(lambda p: p.requires_grad, unet.parameters())) else: self.trainable_layers, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) if config.resume_from: logger.info(f"Resuming from {config.resume_from}") self.accelerator.load_state(config.resume_from) self.first_epoch = int(config.resume_from.split("_")[-1]) + 1 else: self.first_epoch = 0 def compute_rewards(self, prompt_image_pairs): reward, reward_metadata = self.reward_fn( prompt_image_pairs["images"], prompt_image_pairs["prompts"], prompt_image_pairs["prompt_metadata"] ) return reward def step(self, epoch: int, global_step: int): """ Perform a single step of training. Args: epoch (int): The current epoch. global_step (int): The current global step. Side Effects: - Model weights are updated - Logs the statistics to the accelerator trackers. - If `self.image_samples_callback` is not None, it will be called with the prompt_image_pairs, global_step, and the accelerator tracker. Returns: global_step (int): The updated global step. """ info = defaultdict(list) self.sd_pipeline.unet.train() for _ in range(self.config.train_gradient_accumulation_steps): with self.accelerator.accumulate(self.sd_pipeline.unet), self.autocast(), torch.enable_grad(): prompt_image_pairs = self._generate_samples( batch_size=self.config.train_batch_size, ) rewards = self.compute_rewards(prompt_image_pairs) prompt_image_pairs["rewards"] = rewards rewards_vis = self.accelerator.gather(rewards).detach().cpu().numpy() loss = self.calculate_loss(rewards) self.accelerator.backward(loss) if self.accelerator.sync_gradients: self.accelerator.clip_grad_norm_( self.trainable_layers.parameters() if not isinstance(self.trainable_layers, list) else self.trainable_layers, self.config.train_max_grad_norm, ) self.optimizer.step() self.optimizer.zero_grad() info["reward_mean"].append(rewards_vis.mean()) info["reward_std"].append(rewards_vis.std()) info["loss"].append(loss.item()) # Checks if the accelerator has performed an optimization step behind the scenes if self.accelerator.sync_gradients: # log training-related stuff info = {k: torch.mean(torch.tensor(v)) for k, v in info.items()} info = self.accelerator.reduce(info, reduction="mean") info.update({"epoch": epoch}) self.accelerator.log(info, step=global_step) global_step += 1 info = defaultdict(list) else: raise ValueError( "Optimization step should have been performed by this point. Please check calculated gradient accumulation settings." ) # Logs generated images if self.image_samples_callback is not None and global_step % self.config.log_image_freq == 0: self.image_samples_callback(prompt_image_pairs, global_step, self.accelerator.trackers[0]) if epoch != 0 and epoch % self.config.save_freq == 0 and self.accelerator.is_main_process: self.accelerator.save_state() return global_step def calculate_loss(self, rewards): """ Calculate the loss for a batch of an unpacked sample Args: rewards (torch.Tensor): Differentiable reward scalars for each generated image, shape: [batch_size] Returns: loss (torch.Tensor) (all of these are of shape (1,)) """ # Loss is specific to Aesthetic Reward function used in AlignProp (https://huggingface.co/papers/2310.03739) loss = 10.0 - (rewards).mean() return loss def loss( self, advantages: torch.Tensor, clip_range: float, ratio: torch.Tensor, ): unclipped_loss = -advantages * ratio clipped_loss = -advantages * torch.clamp( ratio, 1.0 - clip_range, 1.0 + clip_range, ) return torch.mean(torch.maximum(unclipped_loss, clipped_loss)) def _setup_optimizer(self, trainable_layers_parameters): if self.config.train_use_8bit_adam: import bitsandbytes optimizer_cls = bitsandbytes.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW return optimizer_cls( trainable_layers_parameters, lr=self.config.train_learning_rate, betas=(self.config.train_adam_beta1, self.config.train_adam_beta2), weight_decay=self.config.train_adam_weight_decay, eps=self.config.train_adam_epsilon, ) def _save_model_hook(self, models, weights, output_dir): self.sd_pipeline.save_checkpoint(models, weights, output_dir) weights.pop() # ensures that accelerate doesn't try to handle saving of the model def _load_model_hook(self, models, input_dir): self.sd_pipeline.load_checkpoint(models, input_dir) models.pop() # ensures that accelerate doesn't try to handle loading of the model def _generate_samples(self, batch_size, with_grad=True, prompts=None): """ Generate samples from the model Args: batch_size (int): Batch size to use for sampling with_grad (bool): Whether the generated RGBs should have gradients attached to it. Returns: prompt_image_pairs (dict[Any]) """ prompt_image_pairs = {} sample_neg_prompt_embeds = self.neg_prompt_embed.repeat(batch_size, 1, 1) if prompts is None: prompts, prompt_metadata = zip(*[self.prompt_fn() for _ in range(batch_size)]) else: prompt_metadata = [{} for _ in range(batch_size)] prompt_ids = self.sd_pipeline.tokenizer( prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) prompt_embeds = self.sd_pipeline.text_encoder(prompt_ids)[0] if with_grad: sd_output = self.sd_pipeline.rgb_with_grad( prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, truncated_backprop_rand=self.config.truncated_backprop_rand, truncated_backprop_timestep=self.config.truncated_backprop_timestep, truncated_rand_backprop_minmax=self.config.truncated_rand_backprop_minmax, output_type="pt", ) else: sd_output = self.sd_pipeline( prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, output_type="pt", ) images = sd_output.images prompt_image_pairs["images"] = images prompt_image_pairs["prompts"] = prompts prompt_image_pairs["prompt_metadata"] = prompt_metadata return prompt_image_pairs def train(self, epochs: Optional[int] = None): """ Train the model for a given number of epochs """ global_step = 0 if epochs is None: epochs = self.config.num_epochs for epoch in range(self.first_epoch, epochs): global_step = self.step(epoch, global_step) def _save_pretrained(self, save_directory): self.sd_pipeline.save_pretrained(save_directory) self.create_model_card() def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None tags = tags or [] if isinstance(tags, str): tags = [tags] if hasattr(self.model.config, "unsloth_version"): tags.append("unsloth") citation = textwrap.dedent("""\ @article{prabhudesai2024aligning, title = {{Aligning Text-to-Image Diffusion Models with Reward Backpropagation}}, author = {Mihir Prabhudesai and Anirudh Goyal and Deepak Pathak and Katerina Fragkiadaki}, year = 2024, eprint = {arXiv:2310.03739} }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="AlignProp", trainer_citation=citation, paper_title="Aligning Text-to-Image Diffusion Models with Reward Backpropagation", paper_id="2310.03739", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/alignprop_trainer.py/0
{ "file_path": "trl/trl/trainer/alignprop_trainer.py", "repo_id": "trl", "token_count": 8255 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from transformers import TrainingArguments @dataclass class KTOConfig(TrainingArguments): r""" Configuration class for the [`KTOTrainer`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: learning_rate (`float`, *optional*, defaults to `5e-7`): Initial learning rate for [`AdamW`] optimizer. The default value replaces that of [`~transformers.TrainingArguments`]. max_length (`int` or `None`, *optional*, defaults to `1024`): Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int` or `None`, *optional*, defaults to `512`): Maximum length of the prompt. This argument is required if you want to use the default data collator. max_completion_length (`int` or `None`, *optional*, defaults to `None`): Maximum length of the completion. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, *optional*, defaults to `0.1`): Parameter controlling the deviation from the reference model. Higher β means less deviation from the reference model. loss_type (`str`, *optional*, defaults to `"kto"`): Type of loss to use. Possible values are: - `"kto"`: KTO loss from the [KTO](https://huggingface.co/papers/2402.01306) paper. - `"apo_zero_unpaired"`: Unpaired variant of APO-zero loss from the [APO](https://huggingface.co/papers/2408.06266) paper. desirable_weight (`float`, *optional*, defaults to `1.0`): Desirable losses are weighed by this factor to counter unequal number of desirable and undesirable paris. undesirable_weight (`float`, *optional*, defaults to `1.0`): Undesirable losses are weighed by this factor to counter unequal number of desirable and undesirable pairs. label_pad_token_id (`int`, *optional*, defaults to `-100`): Label pad token id. This argument is required if you want to use the default data collator. padding_value (`int` or `None`, *optional*, defaults to `None`): Padding value to use. If `None`, the padding value of the tokenizer is used. truncation_mode (`str`, *optional*, defaults to `"keep_end"`): Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`. This argument is required if you want to use the default data collator. generate_during_eval (`bool`, *optional*, defaults to `False`): If `True`, generates and logs completions from both the model and the reference model to W&B or Comet during evaluation. is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`): When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument, you need to specify if the model returned by the callable is an encoder-decoder model. precompute_ref_log_probs (`bool`, *optional*, defaults to `False`): Whether to precompute reference model log probabilities for training and evaluation datasets. This is useful when training without the reference model to reduce the total GPU memory needed. model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a string. ref_model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model from a string. dataset_num_proc: (`int` or `None`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model and reference model. """ learning_rate: float = field( default=1e-6, metadata={ "help": "Initial learning rate for `AdamW` optimizer. The default value replaces that of " "`transformers.TrainingArguments`." }, ) max_length: Optional[int] = field( default=1024, metadata={"help": "Maximum length of the sequences (prompt + completion) in the batch."}, ) max_prompt_length: Optional[int] = field( default=512, metadata={ "help": "Maximum length of the prompt. This argument is required if you want to use the default data " "collator and your model is an encoder-decoder." }, ) max_completion_length: Optional[int] = field( default=None, metadata={ "help": "Maximum length of the completion. This argument is required if you want to use the default data " "collator and your model is an encoder-decoder." }, ) beta: float = field( default=0.1, metadata={ "help": "Parameter controlling the deviation from the reference model. Higher β means less deviation from " "the reference model." }, ) loss_type: str = field( default="kto", metadata={ "help": "Type of loss to use.", "choices": ["kto", "apo_zero_unpaired"], }, ) desirable_weight: float = field( default=1.0, metadata={ "help": "Desirable losses are weighed by this factor to counter unequal number of desirable and " "undesirable pairs.", }, ) undesirable_weight: float = field( default=1.0, metadata={ "help": "Undesirable losses are weighed by this factor to counter unequal number of desirable and " "undesirable pairs.", }, ) label_pad_token_id: int = field( default=-100, metadata={ "help": "Label pad token id. This argument is required if you want to use the default data collator." }, ) padding_value: Optional[int] = field( default=None, metadata={"help": "Padding value to use. If `None`, the padding value of the tokenizer is used."}, ) truncation_mode: str = field( default="keep_end", metadata={ "help": "Truncation mode to use when the prompt is too long.", "choices": ["keep_end", "keep_start"], }, ) generate_during_eval: bool = field( default=False, metadata={ "help": "If `True`, generates and logs completions from both the model and the reference model to W&B " "during evaluation." }, ) is_encoder_decoder: Optional[bool] = field( default=None, metadata={ "help": "When using the `model_init` argument (callable) to instantiate the model instead of the `model` " "argument, you need to specify if the model returned by the callable is an encoder-decoder model." }, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropout in the model."}, ) precompute_ref_log_probs: bool = field( default=False, metadata={ "help": "Whether to precompute reference model log probabilities for training and evaluation datasets. " "This is useful when training without the reference model to reduce the total GPU memory needed." }, ) model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model " "from a string." }, ) ref_model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the " "reference model from a string." }, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of processes to use for processing the dataset."}, )
trl/trl/trainer/kto_config.py/0
{ "file_path": "trl/trl/trainer/kto_config.py", "repo_id": "trl", "token_count": 3485 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import math import os import textwrap import time from collections import defaultdict from typing import Callable, Optional, Union import numpy as np import pandas as pd import torch import torch.nn as nn from accelerate import Accelerator from accelerate.utils import broadcast, gather_object from datasets import Dataset from torch.utils.data import DataLoader from transformers import ( BaseImageProcessor, DataCollatorWithPadding, FeatureExtractionMixin, GenerationConfig, PreTrainedTokenizerBase, ProcessorMixin, Trainer, TrainerCallback, TrainerControl, is_wandb_available, ) from transformers.integrations import get_reporting_integration_callbacks from transformers.trainer import DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK from transformers.trainer_callback import CallbackHandler, ExportableState, PrinterCallback from ..models.utils import unwrap_model_for_generation from ..trainer.utils import ( OnlineTrainerState, batch_generation, disable_dropout_in_model, exact_div, first_true_indices, forward, get_reward, prepare_deepspeed, print_rich_table, selective_log_softmax, truncate_response, ) from .rloo_config import RLOOConfig from .utils import generate_model_card, get_comet_experiment_url, log_table_to_comet_experiment if is_wandb_available(): import wandb INVALID_LOGPROB = 1.0 class RLOOTrainer(Trainer): _tag_names = ["trl", "rloo"] def __init__( self, config: RLOOConfig, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ], policy: nn.Module, ref_policy: nn.Module, reward_model: Union[nn.Module, Callable[[list[str]], list[float]]], train_dataset: Dataset, data_collator: Optional[DataCollatorWithPadding] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, # less commonly used optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), callbacks: Optional[list[TrainerCallback]] = None, ) -> None: if ref_policy is policy: raise ValueError( "`policy` and `ref_policy` cannot be the same object. If you want `ref_policy` to be the " "same as `policy`, you must mass a copy of it, or `None` if you use peft." ) self.args = config args = config self.processing_class = processing_class self.policy = policy # Define the collator if not provided if data_collator is None: data_collator = DataCollatorWithPadding(self.processing_class) self.policy.generation_config.eos_token_id = ( None # disable `pad_token_id` and `eos_token_id` because we just want to ) self.policy.generation_config.pad_token_id = None # generate tokens without truncation / padding self.ref_policy = ref_policy self.reward_model = reward_model self.train_dataset = train_dataset self.train_dataset_len = len(train_dataset) self.data_collator = data_collator self.eval_dataset = eval_dataset self.optimizer, self.lr_scheduler = optimizers self.optimizer_cls_and_kwargs = None # needed for transformers >= 4.47 ######### # calculate various batch sizes ######### if args.total_episodes is None: # allow the users to define episodes in terms of epochs. args.total_episodes = int(args.num_train_epochs * self.train_dataset_len) accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps) self.accelerator = accelerator args.world_size = accelerator.num_processes args.local_batch_size = ( args.per_device_train_batch_size * args.gradient_accumulation_steps * args.num_mini_batches ) args.micro_batch_size = int(args.per_device_train_batch_size * args.world_size) args.batch_size = int(args.local_batch_size * args.world_size) args.mini_batch_size = exact_div( args.batch_size, args.num_mini_batches, "`batch_size` must be a multiple of `num_mini_batches`" ) args.local_mini_batch_size = exact_div( args.local_batch_size, args.num_mini_batches, "`local_batch_size` must be a multiple of `num_mini_batches`" ) args.num_total_batches = math.ceil( args.total_episodes / args.batch_size ) # we may train for more than `total_episodes` time_tensor = torch.tensor(int(time.time()), device=accelerator.device) time_int = broadcast(time_tensor, 0).item() # avoid different timestamps across processes args.run_name = f"{args.exp_name}__{args.seed}__{time_int}" self.local_seed = args.seed + accelerator.process_index * 100003 # Prime if args.num_sample_generations > 0: self.sample_generations_freq = max(1, args.num_total_batches // args.num_sample_generations) self.local_dataloader_batch_size = exact_div( args.local_batch_size, args.rloo_k, "`local_batch_size` must be a multiple of rloo_k" ) # RLOO logic: needed because RLOO repeats the same prompt args.rloo_k times ######### # setup model, optimizer, and others ######### for module in [policy, ref_policy, reward_model]: if isinstance(module, nn.Module): disable_dropout_in_model(module) if args.stop_token and args.stop_token == "eos": args.stop_token_id = self.processing_class.eos_token_id self.model = policy self.create_optimizer_and_scheduler( num_training_steps=args.num_total_batches ) # note that we are calling `self.lr_scheduler.step()` manually only at the batch level ######### ### trainer specifics ######### default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler( self.callbacks, self.model, self.processing_class, self.optimizer, self.lr_scheduler ) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) self.control = TrainerControl() self.state = OnlineTrainerState( is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero(), stateful_callbacks=[ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ], ) self.current_flos = 0 self.hp_search_backend = None self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None # Create distant repo and output directory if needed self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) self.backup_model = None # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) ######### ### setup dataloader ######### self.dataloader = DataLoader( self.train_dataset, batch_size=self.local_dataloader_batch_size, shuffle=True, collate_fn=self.data_collator, drop_last=True, # needed; otherwise the last batch will be of ragged shape ) # sync random states for DataLoader(shuffle=True) before `accelerator.prepare` # see https://gist.github.com/vwxyzjn/2581bff1e48e185e0b85b6dfe1def79c torch.manual_seed(args.seed) self.model, self.optimizer, self.dataloader = accelerator.prepare(self.model, self.optimizer, self.dataloader) torch.manual_seed(self.local_seed) # reset the local seed again self.eval_dataloader = DataLoader( self.eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=self.data_collator, drop_last=True, ) # no need to shuffle eval dataset self.eval_dataloader = accelerator.prepare(self.eval_dataloader) if self.is_deepspeed_enabled: if isinstance(self.reward_model, nn.Module): self.reward_model = prepare_deepspeed( self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16 ) self.ref_policy = prepare_deepspeed( self.ref_policy, args.per_device_train_batch_size, args.fp16, args.bf16 ) self.deepspeed = self.model else: self.ref_policy = self.ref_policy.to(self.accelerator.device) if isinstance(self.reward_model, nn.Module): self.reward_model = self.reward_model.to(self.accelerator.device) def get_train_dataloader(self) -> DataLoader: return self.dataloader def get_eval_dataloader(self) -> DataLoader: return self.eval_dataloader def train(self): args = self.args accelerator = self.accelerator optimizer = self.optimizer model = self.model self.model_wrapped = self.model ref_policy = self.ref_policy reward_model = self.reward_model processing_class = self.processing_class dataloader = self.dataloader device = accelerator.device def repeat_generator(): while True: yield from dataloader iter_dataloader = iter(repeat_generator()) generation_config = GenerationConfig( max_new_tokens=args.response_length, temperature=(args.temperature + 1e-7), top_k=0.0, top_p=1.0, do_sample=True, ) accelerator.print("===training policy===") start_time = time.time() stats_shape = (args.num_ppo_epochs, args.num_mini_batches, args.gradient_accumulation_steps) approxkl_stats = torch.zeros(stats_shape, device=device) pg_clipfrac_stats = torch.zeros(stats_shape, device=device) pg_loss_stats = torch.zeros(stats_shape, device=device) vf_clipfrac_stats = torch.zeros(stats_shape, device=device) entropy_stats = torch.zeros(stats_shape, device=device) ratio_stats = torch.zeros(stats_shape, device=device) model.train() # trainer state initialization self.state.global_step = 0 self.state.episode = 0 self.state.max_steps = (args.num_total_batches * args.num_mini_batches) // 2 self.state.num_train_epochs = args.total_episodes / self.train_dataset_len # Compute absolute values for logging, eval, and save if given as ratio if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(self.state.max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(self.state.max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(self.state.max_steps * args.save_steps) else: self.state.save_steps = args.save_steps self.control = self.callback_handler.on_train_begin(args, self.state, self.control) for update in range(1, args.num_total_batches + 1): self.state.episode += 1 * args.batch_size data = next(iter_dataloader) with torch.no_grad(): queries = data["input_ids"].to(device) queries = queries.repeat(args.rloo_k, 1) context_length = queries.shape[1] responses = [] postprocessed_responses = [] logprobs = [] ref_logprobs = [] scores = [] sequence_lengths = [] # Generate responses and compute logprobs with unwrap_model_for_generation( self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation ) as unwrapped_model: query_responses, logitss = batch_generation( unwrapped_model, queries, args.local_rollout_forward_batch_size, processing_class.pad_token_id, generation_config, ) # Process responses in batches for i in range(0, queries.shape[0], args.local_rollout_forward_batch_size): query = queries[i : i + args.local_rollout_forward_batch_size] query_response = query_responses[i : i + args.local_rollout_forward_batch_size] response = query_response[:, context_length:] logits = logitss[i : i + args.local_rollout_forward_batch_size] logprob = selective_log_softmax(logits, response) del logits torch.cuda.empty_cache() ref_output = forward(ref_policy, query_response, processing_class.pad_token_id) ref_logits = ref_output.logits[:, context_length - 1 : -1] ref_logits /= args.temperature + 1e-7 ref_logprob = selective_log_softmax(ref_logits, response) del ref_output, ref_logits torch.cuda.empty_cache() # Response Processing 1. truncate response after the first occurrence of `stop_token_id` postprocessed_response = response if args.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0 postprocessed_response = truncate_response( args.stop_token_id, processing_class.pad_token_id, response ) # Response Processing 2. run reward model on the truncated responses postprocessed_query_response = torch.cat((query, postprocessed_response), 1) sequence_length = first_true_indices(postprocessed_response == processing_class.pad_token_id) - 1 if isinstance(reward_model, nn.Module): _, score, _ = get_reward( reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length ) else: score = torch.tensor( reward_model( processing_class.batch_decode(postprocessed_query_response, skip_special_tokens=True) ), dtype=torch.float, ).to(device) # Store batch results responses.append(response) postprocessed_responses.append(postprocessed_response) logprobs.append(logprob) ref_logprobs.append(ref_logprob) sequence_lengths.append(sequence_length) scores.append(score) # Concatenate all batched results responses = torch.cat(responses, 0) postprocessed_responses = torch.cat(postprocessed_responses, 0) logprobs = torch.cat(logprobs, 0) ref_logprobs = torch.cat(ref_logprobs, 0) sequence_lengths = torch.cat(sequence_lengths, 0) scores = torch.cat(scores, 0) del (logprob, ref_logprob, score) torch.cuda.empty_cache() gc.collect() # Response Processing 3. filter response. Ensure that the sample contains stop_token_id # responses not passing that filter will receive a low (fixed) score # only query humans on responses that pass that filter contain_eos_token = torch.any(postprocessed_responses == processing_class.eos_token_id, dim=-1) if args.missing_eos_penalty is not None: scores[~contain_eos_token] -= self.args.missing_eos_penalty # accelerator.print(f"{scores=}, {(contain_eos_token.sum() / len(contain_eos_token))=}") # be very careful with `padding_mask_p1`; see https://excalidraw.com/#json=LWnzG4w2k5DjF_EOL_xPt,e2w3a-hFJ_gX5vOfeyXGTw response_idxs = torch.arange(responses.shape[1], device=responses.device).repeat(responses.shape[0], 1) padding_mask = response_idxs > sequence_lengths.unsqueeze(1) logprobs = torch.masked_fill(logprobs, padding_mask, INVALID_LOGPROB) ref_logprobs = torch.masked_fill(ref_logprobs, padding_mask, INVALID_LOGPROB) # 4. compute rewards # Compute KL divergence kl = logprobs - ref_logprobs # Normalize rewards if args.normalize_reward: scores = (scores - scores.mean()) / (scores.std() + 1e-8) scores = torch.clamp(scores, -args.reward_clip_range, args.reward_clip_range) # Compute total reward with KL penalty if args.token_level_kl: # Token-level KL penalty: apply KL penalty per token kl_reward = -args.kl_coef * kl # Get the index of the last non-padded token for each sequence eos_indices = padding_mask.size(1) - 1 - padding_mask.long().fliplr().argmax(dim=1, keepdim=True) last_reward = torch.zeros_like(kl) # Ensure scores has correct shape and type scores_shaped = scores.reshape(-1, 1).to(kl.dtype) last_reward.scatter_(dim=1, index=eos_indices, src=scores_shaped) # Combine KL reward and last reward non_score_reward = kl_reward.sum(1) # Keep this for logging reward = last_reward + kl_reward rlhf_reward = reward.sum(1) # Sum across sequence length else: # Sequence-level KL penalty: sum KL across tokens first sequence_kl = kl.sum(1) non_score_reward = -args.kl_coef * sequence_kl rlhf_reward = non_score_reward + scores # vectorized RLOO advantages implementation rlhf_reward = rlhf_reward.reshape(args.rloo_k, -1) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (args.rloo_k - 1) advantages = rlhf_reward - baseline advantages = advantages.flatten() # Normalize advantages if args.normalize_advantage: advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) torch.cuda.empty_cache() # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch for ppo_epoch_idx in range(args.num_ppo_epochs): b_inds = np.random.permutation(args.local_batch_size) minibatch_idx = 0 for mini_batch_start in range(0, args.local_batch_size, args.local_mini_batch_size): mini_batch_end = mini_batch_start + args.local_mini_batch_size mini_batch_inds = b_inds[mini_batch_start:mini_batch_end] gradient_accumulation_idx = 0 for micro_batch_start in range(0, args.local_mini_batch_size, args.per_device_train_batch_size): with accelerator.accumulate(model): micro_batch_end = micro_batch_start + args.per_device_train_batch_size micro_batch_inds = mini_batch_inds[micro_batch_start:micro_batch_end] # Get batch data mb_advantage = advantages[micro_batch_inds] mb_responses = responses[micro_batch_inds] mb_query_responses = query_responses[micro_batch_inds] mb_logprobs = logprobs[micro_batch_inds] # Forward pass output = forward(model, mb_query_responses, processing_class.pad_token_id) logits = output.logits[:, context_length - 1 : -1] logits /= args.temperature + 1e-7 # Compute new logprobs new_logprobs = selective_log_softmax(logits, mb_responses) new_logprobs = torch.masked_fill( new_logprobs, padding_mask[micro_batch_inds], INVALID_LOGPROB ) # Compute probability ratios new_ratio = (new_logprobs - mb_logprobs).exp() new_logprobs = new_logprobs.sum(1) mb_logprobs = mb_logprobs.sum(1) logprobs_diff = new_logprobs - mb_logprobs ratio = torch.exp(logprobs_diff) # PPO clipped loss pg_losses = -mb_advantage * ratio pg_losses2 = -mb_advantage * torch.clamp(ratio, 1.0 - args.cliprange, 1.0 + args.cliprange) pg_loss_max = torch.max(pg_losses, pg_losses2) pg_loss = pg_loss_max.mean() # Final loss loss = pg_loss # Optimization step accelerator.backward(loss) optimizer.step() optimizer.zero_grad() with torch.no_grad(): pg_clipfrac = (pg_losses2 > pg_losses).float().mean() prob_dist = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, dim=-1) - torch.sum(prob_dist * logits, dim=-1) approxkl = 0.5 * (logprobs_diff**2).mean() approxkl_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = approxkl pg_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = ( pg_clipfrac ) pg_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_loss entropy_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = entropy.mean() ratio_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = new_ratio.mean() gradient_accumulation_idx += 1 minibatch_idx += 1 # del everything and empty cache # fmt: off del ( output, logits, new_logprobs, logprobs_diff, ratio, pg_losses, pg_losses2, pg_loss, loss, pg_clipfrac, prob_dist, entropy, approxkl, mb_advantage, mb_responses, mb_query_responses, mb_logprobs, ) # fmt: on torch.cuda.empty_cache() # Compute metrics with torch.no_grad(): mean_kl = kl.sum(1).mean() mean_entropy = (-logprobs).sum(1).mean() mean_non_score_reward = non_score_reward.mean() eps = int(self.state.episode / (time.time() - start_time)) metrics = {} metrics["eps"] = eps metrics["objective/kl"] = self.accelerator.gather_for_metrics(mean_kl).mean().item() metrics["objective/entropy"] = self.accelerator.gather_for_metrics(mean_entropy).mean().item() metrics["objective/non_score_reward"] = ( self.accelerator.gather_for_metrics(mean_non_score_reward).mean().item() ) metrics["objective/rlhf_reward"] = self.accelerator.gather_for_metrics(rlhf_reward).mean().item() metrics["objective/scores"] = self.accelerator.gather_for_metrics(scores.mean()).mean().item() metrics["policy/approxkl_avg"] = self.accelerator.gather_for_metrics(approxkl_stats).mean().item() metrics["policy/clipfrac_avg"] = self.accelerator.gather_for_metrics(pg_clipfrac_stats).mean().item() metrics["loss/policy_avg"] = self.accelerator.gather_for_metrics(pg_loss_stats).mean().item() metrics["val/clipfrac_avg"] = self.accelerator.gather_for_metrics(vf_clipfrac_stats).mean().item() metrics["policy/entropy_avg"] = self.accelerator.gather_for_metrics(entropy_stats).mean().item() metrics["val/ratio"] = self.accelerator.gather_for_metrics(ratio_stats).mean().item() metrics["val/ratio_var"] = self.accelerator.gather_for_metrics(ratio_stats).var().item() metrics["val/num_eos_tokens"] = (responses == processing_class.eos_token_id).sum().item() metrics["lr"] = self.lr_scheduler.get_last_lr()[0] metrics["episode"] = self.state.episode self.state.epoch = self.state.episode / (args.rloo_k * self.train_dataset_len) # used by self.log self.log(metrics) del kl, mean_kl, mean_entropy, scores self.lr_scheduler.step() self.state.global_step += 1 self.control = self.callback_handler.on_step_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None) self.control = self.callback_handler.on_save(self.args, self.state, self.control) torch.cuda.empty_cache() gc.collect() if args.num_sample_generations > 0 and (update - 1) % self.sample_generations_freq == 0: self.generate_completions(sampling=True) # HF trainer specifics self.control = self.callback_handler.on_train_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None, metrics=None) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def generate_completions(self, sampling: bool = False): args = self.args processing_class = self.processing_class generation_config = GenerationConfig( max_new_tokens=self.args.response_length, temperature=(0.01 + 1e-7), top_k=0.0, top_p=1.0, do_sample=True, ) table = defaultdict(list) with unwrap_model_for_generation( self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation ) as unwrapped_model: for batch in self.eval_dataloader: query = batch["input_ids"] with torch.no_grad(): context_length = query.shape[1] query_response, _ = batch_generation( unwrapped_model, query, query.shape[0], processing_class.pad_token_id, generation_config, ) response = query_response[:, context_length:] postprocessed_response = response if args.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0 postprocessed_response = truncate_response( args.stop_token_id, processing_class.pad_token_id, response ) table["query"].extend( gather_object(processing_class.batch_decode(query, skip_special_tokens=True)) ) table["model response"].extend( gather_object(processing_class.batch_decode(postprocessed_response)) ) postprocessed_query_response = torch.cat((query, postprocessed_response), 1) if isinstance(self.reward_model, nn.Module): _, score, _ = get_reward( self.reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length, ) else: score = torch.tensor( self.reward_model( processing_class.batch_decode(postprocessed_query_response, skip_special_tokens=True) ), dtype=torch.float, ).to(postprocessed_query_response.device) table["score"].extend(self.accelerator.gather_for_metrics(score).float().cpu().numpy()) if sampling: break df = pd.DataFrame(table) if self.accelerator.is_main_process: print_rich_table(df.iloc[0 : 0 + 5]) if "wandb" in args.report_to: import wandb if wandb.run is not None: wandb.log({"completions": wandb.Table(dataframe=df)}) if "comet_ml" in args.report_to: log_table_to_comet_experiment( name="completions.csv", table=df, ) def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None tags = tags or [] if isinstance(tags, str): tags = [tags] if hasattr(self.model.config, "unsloth_version"): tags.append("unsloth") citation = textwrap.dedent("""\ @inproceedings{ahmadian2024back, title = {{Back to Basics: Revisiting REINFORCE-Style Optimization for Learning from Human Feedback in LLMs}}, author = {Arash Ahmadian and Chris Cremer and Matthias Gall{\'{e}} and Marzieh Fadaee and Julia Kreutzer and Olivier Pietquin and Ahmet {\"{U}}st{\"{u}}n and Sara Hooker}, year = 2024, booktitle = {Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), {ACL} 2024, Bangkok, Thailand, August 11-16, 2024}, publisher = {Association for Computational Linguistics}, pages = {12248--12267}, editor = {Lun{-}Wei Ku and Andre Martins and Vivek Srikumar}, }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="RLOO", trainer_citation=citation, paper_title="Back to Basics: Revisiting REINFORCE-Style Optimization for Learning from Human Feedback in LLMs", paper_id="2402.14740", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/rloo_trainer.py/0
{ "file_path": "trl/trl/trainer/rloo_trainer.py", "repo_id": "trl", "token_count": 16823 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`. This particular script verifies this for DeepSpeed training. NOTE: MS-AMP does *not* support ZeRO-3. """ # import msamp.deepspeed as msamp_deepspeed import evaluate import torch from fp8_utils import evaluate_model, get_training_utilities from msamp import deepspeed as msamp_deepspeed from accelerate import Accelerator, DeepSpeedPlugin from accelerate.state import AcceleratorState from accelerate.utils import set_seed MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(zero_stage: int = 1, opt_level: str = "O1"): set_seed(42) accelerator = Accelerator() model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) import numpy as np config = { "train_batch_size": 32, "train_micro_batch_size_per_gpu": 16, "gradient_accumulation_steps": 1, "zero_optimization": { "stage": zero_stage, "offload_optimizer": {"device": "none", "nvme_path": None}, "offload_param": {"device": "none", "nvme_path": None}, }, "gradient_clipping": 1.0, "steps_per_print": np.inf, "bf16": {"enabled": True}, "fp16": {"enabled": False}, "zero_allow_untested_optimizer": True, "msamp": { "enabled": True, "opt_level": opt_level, }, } ( model, optimizer, _, _, ) = msamp_deepspeed.initialize( model=model, optimizer=optimizer, config_params=config, ) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss model.backward(loss) model.step() for _ in range(accelerator.num_processes): lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() torch.cuda.empty_cache() AcceleratorState()._reset_state(True) assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results def train_integration(zero_stage: int = 1, opt_level: str = "O1"): set_seed(42) deepspeed_plugin = DeepSpeedPlugin( zero_stage=zero_stage, enable_msamp=True, msamp_opt_level=opt_level, ) accelerator = Accelerator(mixed_precision="fp8", deepspeed_plugin=deepspeed_plugin) accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16 model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() torch.cuda.empty_cache() assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' AcceleratorState()._reset_state(True) return base_model_results, trained_model_results if __name__ == "__main__": for zero_stage in [1, 2]: for opt_level in ["O1", "O2", "O3"]: baseline_not_trained, baseline_trained = train_baseline(zero_stage, opt_level) accelerator_not_trained, accelerator_trained = train_integration(zero_stage, opt_level) assert ( baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"] ), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}' assert ( baseline_not_trained["f1"] == accelerator_not_trained["f1"] ), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}' assert ( baseline_trained["accuracy"] == accelerator_trained["accuracy"] ), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}' assert ( baseline_trained["f1"] == accelerator_trained["f1"] ), f'ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}' torch.distributed.destroy_process_group()
accelerate/benchmarks/fp8/ms_amp/distrib_deepspeed.py/0
{ "file_path": "accelerate/benchmarks/fp8/ms_amp/distrib_deepspeed.py", "repo_id": "accelerate", "token_count": 2583 }
- sections: - local: index title: 🤗 Accelerate - local: basic_tutorials/install title: Installation - local: quicktour title: Quicktour title: Getting started - sections: - local: basic_tutorials/overview title: Overview - local: basic_tutorials/migration title: Add Accelerate to your code - local: basic_tutorials/execution title: Execution process - local: basic_tutorials/tpu title: TPU training - local: basic_tutorials/launch title: Launching Accelerate scripts - local: basic_tutorials/notebook title: Launching distributed training from Jupyter Notebooks title: Tutorials - sections: - isExpanded: true sections: - local: usage_guides/explore title: Start Here! - local: usage_guides/model_size_estimator title: Model memory estimator - local: usage_guides/quantization title: Model quantization - local: usage_guides/tracking title: Experiment trackers - local: usage_guides/profiler title: Profiler - local: usage_guides/checkpoint title: Checkpointing - local: basic_tutorials/troubleshooting title: Troubleshoot - local: usage_guides/training_zoo title: Example Zoo title: Accelerate - isExpanded: true sections: - local: usage_guides/gradient_accumulation title: Gradient accumulation - local: usage_guides/local_sgd title: Local SGD - local: usage_guides/low_precision_training title: Low precision (FP8) training - local: usage_guides/deepspeed title: DeepSpeed - local: usage_guides/deepspeed_multiple_model title: Using multiple models with DeepSpeed - local: usage_guides/ddp_comm_hook title: DDP Communication Hooks - local: usage_guides/fsdp title: Fully Sharded Data Parallel - local: usage_guides/megatron_lm title: Megatron-LM - local: usage_guides/sagemaker title: Amazon SageMaker - local: usage_guides/mps title: Apple M1 GPUs - local: usage_guides/ipex title: IPEX training with CPU title: Training - isExpanded: true sections: - local: usage_guides/big_modeling title: Big Model Inference - local: usage_guides/distributed_inference title: Distributed inference title: Inference title: How to guides - sections: - local: concept_guides/internal_mechanism title: Accelerate's internal mechanism - local: concept_guides/big_model_inference title: Loading big models into memory - local: concept_guides/performance title: Comparing performance across distributed setups - local: concept_guides/deferring_execution title: Executing and deferring jobs - local: concept_guides/gradient_synchronization title: Gradient synchronization - local: concept_guides/fsdp_and_deepspeed title: FSDP vs DeepSpeed - local: concept_guides/low_precision_training title: Low precision training methods - local: concept_guides/training_tpu title: Training on TPUs title: Concepts and fundamentals - sections: - local: package_reference/accelerator title: Accelerator - local: package_reference/state title: Stateful classes - local: package_reference/cli title: The Command Line - local: package_reference/torch_wrappers title: DataLoaders, Optimizers, Schedulers - local: package_reference/tracking title: Experiment trackers - local: package_reference/launchers title: Launchers - local: package_reference/deepspeed title: DeepSpeed utilities - local: package_reference/logging title: Logging - local: package_reference/big_modeling title: Working with large models - local: package_reference/inference title: Pipeline parallelism - local: package_reference/kwargs title: Kwargs handlers - local: package_reference/fp8 title: FP8 - local: package_reference/utilities title: Utility functions and classes - local: package_reference/megatron_lm title: Megatron-LM utilities - local: package_reference/fsdp title: Fully Sharded Data Parallel utilities title: "Reference"
accelerate/docs/source/_toctree.yml/0
{ "file_path": "accelerate/docs/source/_toctree.yml", "repo_id": "accelerate", "token_count": 1411 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Training on TPUs Training on TPUs can be slightly different from training on multi-gpu, even with Accelerate. This guide aims to show you where you should be careful and why, as well as the best practices in general. ## Training in a Notebook The main carepoint when training on TPUs comes from the [`notebook_launcher`]. As mentioned in the [notebook tutorial](../usage_guides/notebook), you need to restructure your training code into a function that can get passed to the [`notebook_launcher`] function and be careful about not declaring any tensors on the GPU. While on a TPU that last part is not as important, a critical part to understand is that when you launch code from a notebook you do so through a process called **forking**. When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already utilizing a python process, you need to *fork* a new process from it to launch your code. Where this becomes important is in regard to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead, one model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or on Google Colaboratory. Below is an example of a training function passed to the [`notebook_launcher`] if training on CPUs or GPUs: <Tip> This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) with slight modifications for the sake of simplicity </Tip> ```python def training_function(): # Initialize accelerator accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2) train_dataloader, eval_dataloader = create_dataloaders( train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"] ) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=hyperparameters["learning_rate"]) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) num_epochs = hyperparameters["num_epochs"] # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` ```python from accelerate import notebook_launcher notebook_launcher(training_function) ``` <Tip> The `notebook_launcher` will default to 8 processes if Accelerate has been configured for a TPU </Tip> If you use this example and declare the model *inside* the training loop, then on a low-resource system you will potentially see an error like: ``` ProcessExitedException: process 0 terminated with signal SIGSEGV ``` This error is *extremely* cryptic but the basic explanation is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to accept a single `model` argument, and declare it in an outside cell: ```python # In another Jupyter cell model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2) ``` ```diff + def training_function(model): # Initialize accelerator accelerator = Accelerator() - model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2) train_dataloader, eval_dataloader = create_dataloaders( train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"] ) ... ``` And finally calling the training function with: ```diff from accelerate import notebook_launcher - notebook_launcher(training_function) + notebook_launcher(training_function, (model,)) ``` <Tip> The above workaround is only needed when launching a TPU instance from a Jupyter Notebook on a low-resource server such as Google Colaboratory or Kaggle. If using a script or launching on a much beefier server declaring the model beforehand is not needed. </Tip> ## Mixed Precision and Global Variables As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), Accelerate supports fp16 and bf16, both of which can be used on TPUs. That being said, ideally `bf16` should be utilized as it is extremely efficient to use. There are two "layers" when using `bf16` and Accelerate on TPUs, at the base level and at the operation level. At the base level, this is enabled when passing `mixed_precision="bf16"` to `Accelerator`, such as: ```python accelerator = Accelerator(mixed_precision="bf16") ``` By default, this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs. The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`. There is a further configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then `torch.float` is `bfloat16` and `torch.double` is `float32`. This is performed in the `Accelerator` object when passing `downcast_bf16=True`: ```python accelerator = Accelerator(mixed_precision="bf16", downcast_bf16=True) ``` Using downcasting instead of bf16 everywhere is good for when you are trying to calculate metrics, log values, and more where raw bf16 tensors would be unusable. ## Training Times on TPUs As you launch your script, you may notice that training seems exceptionally slow at first. This is because TPUs first run through a few batches of data to see how much memory to allocate before finally utilizing this configured memory allocation extremely efficiently. If you notice that your evaluation code to calculate the metrics of your model takes longer due to a larger batch size being used, it is recommended to keep the batch size the same as the training data if it is too slow. Otherwise the memory will reallocate to this new batch size after the first few iterations. <Tip> Just because the memory is allocated does not mean it will be used or that the batch size will increase when going back to your training dataloader. </Tip>
accelerate/docs/source/concept_guides/training_tpu.md/0
{ "file_path": "accelerate/docs/source/concept_guides/training_tpu.md", "repo_id": "accelerate", "token_count": 2196 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Low Precision Training Methods Accelerate provides integrations to train on lower precision methods using specified supported hardware through the `TransformersEngine` and `MS-AMP` packages. This documentation will help guide you through what hardware is supported, how to configure your [`Accelerator`] to leverage the low precision methods, and what you can expect when training. ## What training on FP8 means To explore more of the nitty-gritty in training in FP8 with PyTorch and Accelerate, check out the [concept_guide](../concept_guides/low_precision_training) on why this can be difficult. But essentially rather than training in BF16, some (or all) aspects of training a model can be performed using 8 bits instead of 16. The challenge is doing so without degrading final performance. This is only enabled on specific NVIDIA hardware, namely: * Anything after the 3000 series consumer graphics cards (such as the 4090) * Hopper-based GPU architectures (such as the `H100` and `H200`) What this will result in is some gain in the memory used (as we've cut the needed memory in half for some parts of training) and an increase in throughput *should* be seen as well for larger models that can replace certain layers with FP8-enabled ones. ## Configuring the Accelerator Currently two different backends for FP8 are supported (`TransformersEngine` and `MS-AMP`), each with different capabilities and configurations. To use either, the same core API is used. Just pass `mixed_precision="fp8"` to either the [`Accelerator`], during `accelerate config` when prompted about mixed precision, or as part of your `config.yaml` file in the `mixed_precision` key: ```{python} from accelerate import Accelerator accelerator = Accelerator(mixed_precision="fp8") ``` By default, if `MS-AMP` is available in your environment, Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize the [`utils.FP8RecipeKwargs`] or clarify it in your config `yaml`/during `accelerate launch`: ```{python} from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = [FP8RecipeKwargs(backend="msamp")] # Or to specify the backend as `TransformersEngine` even if MS-AMP is installed # kwargs = [FP8RecipeKwargs(backend="te")] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` ```{yaml} mixed_precision: fp8 fp8_config: amax_compute_algorithm: max amax_history_length: 1024 backend: TE fp8_format: HYBRID interval: 1 margin: 0 override_linear_precision: false use_autocast_during_eval: false ``` ## Configuring MS-AMP Of the two, `MS-AMP` is traditionally the easier one to configure as there is only a single argument: the optimization level. Currently two levels of optimization are supported in the Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero). * `"O1"` will cast the weight gradients and `all_reduce` communications to happen in 8-bit, while the rest are done in 16 bit. This reduces the general GPU memory usage and speeds up communication bandwidths. * `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries its best to minimize final accuracy degradation and will save the highest potential memory. To specify an optimization level, pass it to the `FP8KwargsHandler` by setting the `optimization_level` argument: ```{python} from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = [FP8RecipeKwargs(backend="msamp", optimization_level="O2")] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` Or during `accelerate launch` via `--fp8_backend=msamp --fp8_opt_level=O2` Similarly this can be set in your `config.yaml`: ```{yaml} mixed_precision: fp8 fp8_config: backend: MSAMP opt_level: O2 ``` ## Configuring TransformersEngine TransformersEngine has much more available for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience. Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially. To use it, specify `backend="te"` and modify any of the arguments you want as part of your kwarg handler: ```{python} from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = [FP8RecipeKwargs(backend="te", ...)] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` Or during `accelerate launch` via `--fp8_backend=te ...`. Use `accelerate launch --fp8_backend=te -h` to see relevent arguments. Similarly this can be set in your `config.yaml`: ```{yaml} mixed_precision: fp8 fp8_config: amax_compute_algorithm: max amax_history_length: 1024 backend: TE fp8_format: HYBRID interval: 1 margin: 0 override_linear_precision: false use_autocast_during_eval: false ``` ## Example Zoo We have examples showcasing training with FP8 both with accelerate and its underlying implementation available in the accelerate repo. Currently we support scripts showcasing: * Single GPU * Distributed Data Parallelism (Multi-GPU) * Fully Sharded Data Parallelism * DeepSpeed ZeRO 1 through 3 Find out more [here](https://github.com/huggingface/accelerate/tree/main/benchmarks/fp8) ## Further Reading To learn more about training in FP8 please check out the following resources: * [Our concept guide](../concept_guides/low_precision_training) detailing into more about both TransformersEngine and MS-AMP * [The `transformers-engine` documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html) * [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/)
accelerate/docs/source/usage_guides/low_precision_training.md/0
{ "file_path": "accelerate/docs/source/usage_guides/low_precision_training.md", "repo_id": "accelerate", "token_count": 1933 }
# Since we are doing FSDP (even though it's multi-GPU), we need to specify the distributed type as FSDP distributed_type: FSDP # Can be one of "no", "fp16", or "bf16" (see `transformer_engine.yaml` for `fp8`, but it works for FSDP as well) mixed_precision: 'bf16' # Specify the number of GPUs to use num_processes: 2 # Then we can specify the FSDP config fsdp_config: fsdp_activation_checkpointing: false fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_use_orig_params: true
accelerate/examples/config_yaml_templates/fsdp.yaml/0
{ "file_path": "accelerate/examples/config_yaml_templates/fsdp.yaml", "repo_id": "accelerate", "token_count": 275 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from diffusers import DiffusionPipeline from accelerate import PartialState # Can also be Accelerator or AcceleratorState pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipe.to(distributed_state.device) # Assume two processes # On the first GPU, the prompts will be ["a dog", "a cat"], # and on the second GPU it will be ["a chicken", "a chicken"]. # Make sure to drop the final sample, as it will be a duplicate of the previous one. with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"], apply_padding=True) as prompt: result = pipe(prompt).images
accelerate/examples/inference/distributed/stable_diffusion.py/0
{ "file_path": "accelerate/examples/inference/distributed/stable_diffusion.py", "repo_id": "accelerate", "token_count": 363 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from manim import * class Stage2(Scene): def construct(self): mem = Rectangle(height=0.5,width=0.5) fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) cpu_left_col_base = [mem.copy() for i in range(6)] cpu_right_col_base = [mem.copy() for i in range(6)] cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0) cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0) cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0) cpu_text = Text("CPU", font_size=24) cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) cpu.move_to([-2.5,-.5,0]) self.add(cpu) gpu_base = [mem.copy() for i in range(4)] gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0) gpu_text = Text("GPU", font_size=24) gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) gpu.move_to([-1,-1,0]) self.add(gpu) model_base = [mem.copy() for i in range(6)] model_rect = VGroup(*model_base).arrange(RIGHT,buff=0) model_text = Text("Model", font_size=24) model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) model.move_to([3, -1., 0]) self.add(model) cpu_targs = [] for i,rect in enumerate(model_base): rect.set_stroke(YELLOW) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP) cpu_target.set_x(cpu_target.get_x()+0.1) elif i == 3: cpu_target.next_to(cpu_targs[0], direction=UP, buff=0.) else: cpu_target.next_to(cpu_targs[i-1], direction=RIGHT, buff=0.) self.add(cpu_target) cpu_targs.append(cpu_target) checkpoint_base = [mem.copy() for i in range(6)] checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0) checkpoint_text = Text("Loaded Checkpoint", font_size=24) checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, aligned_edge=DOWN, buff=0.4) checkpoint.move_to([3, .5, 0]) key = Square(side_length=2.2) key.move_to([-5, 2, 0]) key_text = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, ) key_text.move_to([-5, 2.4, 0]) self.add(key_text, key) blue_text = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint", font_size=18, ) blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left()) step_2 = MarkupText( f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.', font_size=24 ) step_2.move_to([2, 2, 0]) self.play( Write(step_2), Write(blue_text) ) self.play( Write(checkpoint_text, run_time=1), Create(checkpoint_rect, run_time=1) ) first_animations = [] second_animations = [] for i,rect in enumerate(checkpoint_base): target = fill.copy().set_fill(BLUE, opacity=0.7) target.move_to(rect) first_animations.append(GrowFromCenter(target, run_time=1)) cpu_target = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i+1]) else: cpu_target.target.move_to(cpu_right_col_base[i-5]) second_animations.append(MoveToTarget(cpu_target, run_time=1.5)) self.play(*first_animations) self.play(*second_animations) self.wait()
accelerate/manim_animations/big_model_inference/stage_2.py/0
{ "file_path": "accelerate/manim_animations/big_model_inference/stage_2.py", "repo_id": "accelerate", "token_count": 2354 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from contextlib import contextmanager from functools import wraps from typing import Dict, List, Optional, Union import torch import torch.nn as nn from .hooks import ( AlignDevicesHook, CpuOffload, UserCpuOffloadHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks, ) from .utils import ( OffloadedWeightsLoader, check_cuda_p2p_ib_support, check_device_map, extract_submodules_state_dict, find_tied_parameters, get_balanced_memory, infer_auto_device_map, is_bnb_available, is_mlu_available, is_musa_available, is_npu_available, is_xpu_available, load_checkpoint_in_model, offload_state_dict, parse_flag_from_env, retie_parameters, ) from .utils.other import recursive_getattr logger = logging.getLogger(__name__) @contextmanager def init_empty_weights(include_buffers: bool = None): """ A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn from accelerate import init_empty_weights # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not called. </Tip> """ if include_buffers is None: include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False) with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f: yield f @contextmanager def init_on_device(device: torch.device, include_buffers: bool = None): """ A context manager under which models are initialized with all parameters on the specified device. Args: device (`torch.device`): Device to initialize all parameters on. include_buffers (`bool`, *optional*): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn from accelerate import init_on_device with init_on_device(device=torch.device("cuda")): tst = nn.Linear(100, 100) # on `cuda` device ``` """ if include_buffers is None: include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False) if include_buffers: with device: yield return old_register_parameter = nn.Module.register_parameter if include_buffers: old_register_buffer = nn.Module.register_buffer def register_empty_parameter(module, name, param): old_register_parameter(module, name, param) if param is not None: param_cls = type(module._parameters[name]) kwargs = module._parameters[name].__dict__ kwargs["requires_grad"] = param.requires_grad module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs) def register_empty_buffer(module, name, buffer, persistent=True): old_register_buffer(module, name, buffer, persistent=persistent) if buffer is not None: module._buffers[name] = module._buffers[name].to(device) # Patch tensor creation if include_buffers: tensor_constructors_to_patch = { torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ["empty", "zeros", "ones", "full"] } else: tensor_constructors_to_patch = {} def patch_tensor_constructor(fn): def wrapper(*args, **kwargs): kwargs["device"] = device return fn(*args, **kwargs) return wrapper try: nn.Module.register_parameter = register_empty_parameter if include_buffers: nn.Module.register_buffer = register_empty_buffer for torch_function_name in tensor_constructors_to_patch.keys(): setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name))) yield finally: nn.Module.register_parameter = old_register_parameter if include_buffers: nn.Module.register_buffer = old_register_buffer for torch_function_name, old_torch_function in tensor_constructors_to_patch.items(): setattr(torch, torch_function_name, old_torch_function) def cpu_offload( model: nn.Module, execution_device: Optional[torch.device] = None, offload_buffers: bool = False, state_dict: Optional[Dict[str, torch.Tensor]] = None, preload_module_classes: Optional[List[str]] = None, ): """ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the model that will be kept on CPU. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. """ if execution_device is None: execution_device = next(iter(model.parameters())).device if state_dict is None: state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()} add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) attach_align_device_hook( model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict, preload_module_classes=preload_module_classes, ) return model def cpu_offload_with_hook( model: torch.nn.Module, execution_device: Optional[Union[int, str, torch.device]] = None, prev_module_hook: Optional[UserCpuOffloadHook] = None, ): """ Offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop. Args: model (`torch.nn.Module`): The model to offload. execution_device(`str`, `int` or `torch.device`, *optional*): The device on which the model should be executed. Will default to the MPS device if it's available, then GPU 0 if there is a GPU, and finally to the CPU. prev_module_hook (`UserCpuOffloadHook`, *optional*): The hook sent back by this function for a previous model in the pipeline you are running. If passed, its offload method will be called just before the forward of the model to which this hook is attached. Example: ```py model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device) model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1) model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2) hid_1 = model_1(input) for i in range(50): # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. hid_2 = model_2(hid_1) # model2 is offloaded to the CPU just before this forward. hid_3 = model_3(hid_3) # For model3, you need to manually call the hook offload method. hook_3.offload() ``` """ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook) add_hook_to_module(model, hook, append=True) user_hook = UserCpuOffloadHook(model, hook) return model, user_hook def disk_offload( model: nn.Module, offload_dir: Union[str, os.PathLike], execution_device: Optional[torch.device] = None, offload_buffers: bool = False, preload_module_classes: Optional[List[str]] = None, ): """ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. offload_dir (`str` or `os.PathLike`): The folder in which to offload the model weights (or where the model weights are already offloaded). execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model's first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. """ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")): offload_state_dict(offload_dir, model.state_dict()) if execution_device is None: execution_device = next(iter(model.parameters())).device weights_map = OffloadedWeightsLoader(save_folder=offload_dir) add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) attach_align_device_hook( model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=weights_map, preload_module_classes=preload_module_classes, ) return model def dispatch_model( model: nn.Module, device_map: Dict[str, Union[str, int, torch.device]], main_device: Optional[torch.device] = None, state_dict: Optional[Dict[str, torch.Tensor]] = None, offload_dir: Optional[Union[str, os.PathLike]] = None, offload_index: Optional[Dict[str, str]] = None, offload_buffers: bool = False, skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, force_hooks: bool = False, ): """ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on the CPU or even the disk. Args: model (`torch.nn.Module`): The model to dispatch. device_map (`Dict[str, Union[str, int, torch.device]]`): A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that `"disk"` is accepted even if it's not a proper value for `torch.device`. main_device (`str`, `int` or `torch.device`, *optional*): The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or `"disk"`. state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the part of the model that will be kept on CPU. offload_dir (`str` or `os.PathLike`): The folder in which to offload the model weights (or where the model weights are already offloaded). offload_index (`Dict`, *optional*): A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default to the index saved in `save_folder`. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. force_hooks (`bool`, *optional*, defaults to `False`): Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a single device. """ # Error early if the device map is incomplete. check_device_map(model, device_map) # We need to force hook for quantized model that can't be moved with to() if getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes": # since bnb 0.43.2, we can move 4-bit model if getattr(model, "is_loaded_in_8bit", False) or ( getattr(model, "is_loaded_in_4bit", False) and not is_bnb_available(min_version="0.43.2") ): force_hooks = True # We attach hooks if the device_map has at least 2 different devices or if # force_hooks is set to `True`. Otherwise, the model in already loaded # in the unique device and the user can decide where to dispatch the model. # If the model is quantized, we always force-dispatch the model if (len(set(device_map.values())) > 1) or force_hooks: if main_device is None: if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}: main_device = "cpu" else: main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0] if main_device != "cpu": cpu_modules = [name for name, device in device_map.items() if device == "cpu"] if state_dict is None and len(cpu_modules) > 0: state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules) disk_modules = [name for name, device in device_map.items() if device == "disk"] if offload_dir is None and offload_index is None and len(disk_modules) > 0: raise ValueError( "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules " f"need to be offloaded: {', '.join(disk_modules)}." ) if ( len(disk_modules) > 0 and offload_index is None and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json"))) ): disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules) offload_state_dict(offload_dir, disk_state_dict) execution_device = { name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items() } execution_device[""] = main_device offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"] offload = {name: device in offloaded_devices for name, device in device_map.items()} save_folder = offload_dir if len(disk_modules) > 0 else None if state_dict is not None or save_folder is not None or offload_index is not None: device = main_device if offload_index is not None else None weights_map = OffloadedWeightsLoader( state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device ) else: weights_map = None # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its # original pointer) on each devices. tied_params = find_tied_parameters(model) tied_params_map = {} for group in tied_params: for param_name in group: # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need # to care about views of tensors through storage_offset. data_ptr = recursive_getattr(model, param_name).data_ptr() tied_params_map[data_ptr] = {} # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer, # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. attach_align_device_hook_on_blocks( model, execution_device=execution_device, offload=offload, offload_buffers=offload_buffers, weights_map=weights_map, skip_keys=skip_keys, preload_module_classes=preload_module_classes, tied_params_map=tied_params_map, ) # warn if there is any params on the meta device offloaded_devices_str = " and ".join( [device for device in set(device_map.values()) if device in ("cpu", "disk")] ) if len(offloaded_devices_str) > 0: logger.warning( f"Some parameters are on the meta device because they were offloaded to the {offloaded_devices_str}." ) # Attaching the hook may break tied weights, so we retie them retie_parameters(model, tied_params) # add warning to cuda and to method def add_warning(fn, model): @wraps(fn) def wrapper(*args, **kwargs): warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks." if str(fn.__name__) == "to": to_device = torch._C._nn._parse_to(*args, **kwargs)[0] if to_device is not None: logger.warning(warning_msg) else: logger.warning(warning_msg) for param in model.parameters(): if param.device == torch.device("meta"): raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.") return fn(*args, **kwargs) return wrapper # Make sure to update _accelerate_added_attributes in hooks.py if you add any hook model.to = add_warning(model.to, model) if is_npu_available(): model.npu = add_warning(model.npu, model) elif is_mlu_available(): model.mlu = add_warning(model.mlu, model) elif is_musa_available(): model.musa = add_warning(model.musa, model) elif is_xpu_available(): model.xpu = add_warning(model.xpu, model) else: model.cuda = add_warning(model.cuda, model) # Check if we are using multi-gpus with RTX 4000 series use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1 if use_multi_gpu and not check_cuda_p2p_ib_support(): logger.warning( "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. " "This can affect the multi-gpu inference when using accelerate device_map." "Please make sure to update your driver to the latest version which resolves this." ) else: device = list(device_map.values())[0] # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if is_npu_available() and isinstance(device, int): device = f"npu:{device}" elif is_mlu_available() and isinstance(device, int): device = f"mlu:{device}" elif is_musa_available() and isinstance(device, int): device = f"musa:{device}" elif is_xpu_available() and isinstance(device, int): device = f"xpu:{device}" if device != "disk": model.to(device) else: raise ValueError( "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead." ) # Convert OrderedDict back to dict for easier usage model.hf_device_map = dict(device_map) return model def load_checkpoint_and_dispatch( model: nn.Module, checkpoint: Union[str, os.PathLike], device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_buffers: bool = False, dtype: Optional[Union[str, torch.dtype]] = None, offload_state_dict: Optional[bool] = None, skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, force_hooks: bool = False, strict: bool = False, ): """ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded and adds the various hooks that will make this model run properly (even if split across devices). Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map). Defaults to None, which means [`dispatch_model`] will not be called. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_buffers (`bool`, *optional*, defaults to `False`): In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as well as the parameters. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map picked contains `"disk"` values. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. force_hooks (`bool`, *optional*, defaults to `False`): Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a single device. strict (`bool`, *optional*, defaults to `False`): Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's state_dict. Example: ```python >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch >>> from huggingface_hub import hf_hub_download >>> from transformers import AutoConfig, AutoModelForCausalLM >>> # Download the Weights >>> checkpoint = "EleutherAI/gpt-j-6B" >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin") >>> # Create a model and initialize it with empty weights >>> config = AutoConfig.from_pretrained(checkpoint) >>> with init_empty_weights(): ... model = AutoModelForCausalLM.from_config(config) >>> # Load the checkpoint and dispatch it to the right devices >>> model = load_checkpoint_and_dispatch( ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"] ... ) ``` """ if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) if isinstance(device_map, str): if device_map != "sequential": max_memory = get_balanced_memory( model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype, low_zero=(device_map == "balanced_low_0"), ) device_map = infer_auto_device_map( model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype, offload_buffers=offload_buffers, ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): offload_state_dict = True load_checkpoint_in_model( model, checkpoint, device_map=device_map, offload_folder=offload_folder, dtype=dtype, offload_state_dict=offload_state_dict, offload_buffers=offload_buffers, strict=strict, ) if device_map is None: return model return dispatch_model( model, device_map=device_map, offload_dir=offload_folder, offload_buffers=offload_buffers, skip_keys=skip_keys, preload_module_classes=preload_module_classes, force_hooks=force_hooks, )
accelerate/src/accelerate/big_modeling.py/0
{ "file_path": "accelerate/src/accelerate/big_modeling.py", "repo_id": "accelerate", "token_count": 11505 }
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet """ import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class CursorInfo(ctypes.Structure): # _fields is a specific attr expected by ctypes _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def hide_cursor(): if os.name == "nt": ci = CursorInfo() handle = ctypes.windll.kernel32.GetStdHandle(-11) ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) ci.visible = False ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) elif os.name == "posix": sys.stdout.write("\033[?25l") sys.stdout.flush() def show_cursor(): if os.name == "nt": ci = CursorInfo() handle = ctypes.windll.kernel32.GetStdHandle(-11) ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) ci.visible = True ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) elif os.name == "posix": sys.stdout.write("\033[?25h") sys.stdout.flush() @contextmanager def hide(): "Context manager to hide the terminal cursor" try: hide_cursor() yield finally: show_cursor()
accelerate/src/accelerate/commands/menu/cursor.py/0
{ "file_path": "accelerate/src/accelerate/commands/menu/cursor.py", "repo_id": "accelerate", "token_count": 763 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import torch from .state import AcceleratorState, GradientState from .utils import DistributedType, honor_type, is_lomo_available, is_torch_xla_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm def move_to_device(state, device): if isinstance(state, (list, tuple)): return honor_type(state, (move_to_device(t, device) for t in state)) elif isinstance(state, dict): return type(state)({k: move_to_device(v, device) for k, v in state.items()}) elif isinstance(state, torch.Tensor): return state.to(device) return state class AcceleratedOptimizer(torch.optim.Optimizer): """ Internal wrapper around a torch optimizer. Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient accumulation. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. device_placement (`bool`, *optional*, defaults to `True`): Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of `optimizer` on the right device. scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*): The scaler to use in the step function if training with mixed precision. """ def __init__(self, optimizer, device_placement=True, scaler=None): self.optimizer = optimizer self.scaler = scaler self.accelerator_state = AcceleratorState() self.gradient_state = GradientState() self.device_placement = device_placement self._is_overflow = False if self.scaler is not None: self._accelerate_step_called = False self._optimizer_original_step_method = self.optimizer.step self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) # Handle device placement if device_placement: state_dict = self.optimizer.state_dict() if self.accelerator_state.distributed_type == DistributedType.XLA: xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) else: state_dict = move_to_device(state_dict, self.accelerator_state.device) self.optimizer.load_state_dict(state_dict) @property def state(self): return self.optimizer.state @state.setter def state(self, state): self.optimizer.state = state @property def param_groups(self): return self.optimizer.param_groups @param_groups.setter def param_groups(self, param_groups): self.optimizer.param_groups = param_groups @property def defaults(self): return self.optimizer.defaults @defaults.setter def defaults(self, defaults): self.optimizer.defaults = defaults def add_param_group(self, param_group): self.optimizer.add_param_group(param_group) def load_state_dict(self, state_dict): if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement: xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) self.optimizer.load_state_dict(state_dict) def state_dict(self): return self.optimizer.state_dict() def zero_grad(self, set_to_none=None): if self.gradient_state.sync_gradients: accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters if accept_arg: if set_to_none is None: set_to_none = True self.optimizer.zero_grad(set_to_none=set_to_none) else: if set_to_none is not None: raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") self.optimizer.zero_grad() def train(self): """ Sets the optimizer to "train" mode. Useful for optimizers like `schedule_free` """ if hasattr(self.optimizer, "train") and callable(self.optimizer.train): self.optimizer.train() elif ( hasattr(self.optimizer, "optimizer") and hasattr(self.optimizer.optimizer, "train") and callable(self.optimizer.optimizer.train) ): # the deepspeed optimizer further wraps the optimizer self.optimizer.optimizer.train() def eval(self): """ Sets the optimizer to "eval" mode. Useful for optimizers like `schedule_free` """ if hasattr(self.optimizer, "eval") and callable(self.optimizer.eval): self.optimizer.eval() def step(self, closure=None): if is_lomo_available(): from lomo_optim import AdaLomo, Lomo if ( not self.gradient_state.is_xla_gradients_synced and self.accelerator_state.distributed_type == DistributedType.XLA ): gradients = xm._fetch_gradients(self.optimizer) xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) self.gradient_state.is_xla_gradients_synced = True if is_lomo_available(): # `step` should be a no-op for LOMO optimizers. if isinstance(self.optimizer, (Lomo, AdaLomo)): return if self.gradient_state.sync_gradients: if self.scaler is not None: self.optimizer.step = self._optimizer_patched_step_method self.scaler.step(self.optimizer, closure) self.scaler.update() if not self._accelerate_step_called: # If the optimizer step was skipped, gradient overflow was detected. self._is_overflow = True else: self._is_overflow = False # Reset the step method to the original one self.optimizer.step = self._optimizer_original_step_method # Reset the indicator self._accelerate_step_called = False else: self.optimizer.step(closure) if self.accelerator_state.distributed_type == DistributedType.XLA: self.gradient_state.is_xla_gradients_synced = False def _switch_parameters(self, parameters_map): for param_group in self.optimizer.param_groups: param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] @property def step_was_skipped(self): """Whether or not the optimizer step was skipped.""" return self._is_overflow def __getstate__(self): _ignored_keys = [ "_accelerate_step_called", "_optimizer_original_step_method", "_optimizer_patched_step_method", ] return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys} def __setstate__(self, state): self.__dict__.update(state) if self.scaler is not None: self._accelerate_step_called = False self._optimizer_original_step_method = self.optimizer.step self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method): def patched_step(*args, **kwargs): accelerated_optimizer._accelerate_step_called = True return method(*args, **kwargs) return patched_step
accelerate/src/accelerate/optimizer.py/0
{ "file_path": "accelerate/src/accelerate/optimizer.py", "repo_id": "accelerate", "token_count": 3435 }
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import tempfile import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import ( BatchSampler, DataLoader, Dataset, IterableDataset, RandomSampler, TensorDataset, default_collate, ) from accelerate.accelerator import Accelerator, DataLoaderConfiguration from accelerate.utils.dataclasses import DistributedType NUM_ELEMENTS = 22 NUM_WORKERS = 4 BATCH_SIZE = 4 class DummyDataset(Dataset): def __len__(self): return NUM_ELEMENTS def __getitem__(self, index): squeeze = False if isinstance(index, int): index = [index] squeeze = True elif isinstance(index, slice): index = list(range(*index.indices(self.size))) else: index = list(index) batch = [{"index": i, "label": i % 2, "random_augmentation": torch.rand(1).item()} for i in index] if squeeze: batch = batch[0] return batch class DummyIterableDataset(IterableDataset): def __init__(self, data): self.data = data def __iter__(self): yield from self.data def create_accelerator(even_batches=True): dataloader_config = DataLoaderConfiguration(even_batches=even_batches) accelerator = Accelerator(dataloader_config=dataloader_config) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def create_dataloader( accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False, shuffle: bool = False ): """ Create a simple DataLoader to use during the test cases """ values = torch.as_tensor(range(dataset_size)) if shuffle: values = values[torch.randperm(values.size(0))] if iterable: dataset = DummyIterableDataset(values) else: dataset = TensorDataset(torch.as_tensor(range(dataset_size))) dl = DataLoader(dataset, batch_size=batch_size) dl = accelerator.prepare(dl) return dl def verify_dataloader_batch_sizes( accelerator: Accelerator, dataset_size: int, batch_size: int, process_0_expected_batch_sizes: List[int], process_1_expected_batch_sizes: List[int], ): """ A helper function for verifying the batch sizes coming from a prepared dataloader in each process """ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size) batch_sizes = [len(batch[0]) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def test_default_ensures_even_batch_sizes(): accelerator = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( accelerator, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1, 1], ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( accelerator, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 2], ) def test_can_disable_even_batches(): accelerator = create_accelerator(even_batches=False) verify_dataloader_batch_sizes( accelerator, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1], ) verify_dataloader_batch_sizes( accelerator, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 1], ) def test_can_join_uneven_inputs(): accelerator = create_accelerator(even_batches=False) model = torch.nn.Linear(1, 1) ddp_model = accelerator.prepare(model) dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) batch_idxs = [] with accelerator.join_uneven_inputs([ddp_model]): for batch_idx, batch in enumerate(dl): output = ddp_model(batch[0].float()) loss = output.sum() loss.backward() batch_idxs.append(batch_idx) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def test_join_raises_warning_for_non_ddp_distributed(accelerator): with warnings.catch_warnings(record=True) as w: with accelerator.join_uneven_inputs([Mock()]): pass assert issubclass(w[-1].category, UserWarning) assert "only supported for multi-GPU" in str(w[-1].message) def test_join_can_override_even_batches(): default_even_batches = True overridden_even_batches = False accelerator = create_accelerator(even_batches=default_even_batches) model = torch.nn.Linear(1, 1) ddp_model = accelerator.prepare(model) train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): train_dl_overridden_value = train_dl.batch_sampler.even_batches valid_dl_overridden_value = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def test_join_can_override_for_mixed_type_dataloaders(): default_even_batches = True overridden_even_batches = False accelerator = create_accelerator(even_batches=default_even_batches) model = torch.nn.Linear(1, 1) ddp_model = accelerator.prepare(model) create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) with warnings.catch_warnings(): warnings.filterwarnings("ignore") try: with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): batch_dl_overridden_value = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def test_join_raises_warning_for_iterable_when_overriding_even_batches(): accelerator = create_accelerator() model = torch.nn.Linear(1, 1) ddp_model = accelerator.prepare(model) create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) with warnings.catch_warnings(record=True) as w: with accelerator.join_uneven_inputs([ddp_model], even_batches=False): pass assert issubclass(w[-1].category, UserWarning) assert "only supported for map-style datasets" in str(w[-1].message) def test_pickle_accelerator(): accelerator = create_accelerator() data_loader = create_dataloader(accelerator, dataset_size=32, batch_size=4) _ = accelerator.prepare(data_loader) pickled_accelerator = pickle.dumps(accelerator) unpickled_accelerator = pickle.loads(pickled_accelerator) # TODO: Maybe this should be implemented as __eq__ for AcceleratorState? assert accelerator.state.__dict__ == unpickled_accelerator.state.__dict__ def test_data_loader(data_loader, accelerator): # Prepare the DataLoader data_loader = accelerator.prepare(data_loader) all_examples = [] for i, batch in enumerate(data_loader): index, _ = accelerator.gather_for_metrics((batch["index"], batch["label"])) all_examples.extend(index.detach().cpu().numpy().tolist()) # Sort the examples sorted_all_examples = sorted(all_examples) # Check if all elements are present in the sorted list of iterated samples assert ( len(set(sorted_all_examples)) == NUM_ELEMENTS ), "Not all the dataset elements have been iterated in an epoch due to duplication of samples across processes." def test_stateful_dataloader(accelerator): """ Tests that a stateful dataloader can be iterated over, saved after a few batches using `load_state_dict`, and then resumed from the saved state. The result should be the same as the rest of the data that iterated over after saving. """ old_dataloader_config = accelerator.dataloader_config try: accelerator.dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=True) prepared_dl = create_dataloader( accelerator, dataset_size=32 * accelerator.num_processes, batch_size=4, iterable=True, shuffle=True ) untrained_batches = [] # Calculate what step that will be total_batches = 32 * accelerator.num_processes // (4 * accelerator.num_processes) last_batch_num = total_batches - 1 for step, batch in enumerate(prepared_dl): # Step just before if step == last_batch_num - 1: state_dict = prepared_dl.state_dict() if step >= last_batch_num: # Otherwise grab the "unseen" batches untrained_batches.append(batch) not_skipped_batches = accelerator.gather(untrained_batches) prepared_dl.load_state_dict(state_dict) resumed_batches = [] for batch in prepared_dl: resumed_batches.append(batch) resumed_batches = accelerator.gather(resumed_batches) for b1, b2 in zip(not_skipped_batches, resumed_batches): for v1, v2 in zip(b1, b2): assert torch.equal(v1, v2), f"Batch {b1} and {b2} are not equal" finally: accelerator.dataloader_config = old_dataloader_config def test_stateful_dataloader_save_state(accelerator): """ Tests that a stateful dataloader can be iterated over, saved after a few batches using `Accelerator.save_state`, and then resumed from the saved state. The result should be the same as the rest of the data that iterated over after saving. """ old_dataloader_config = accelerator.dataloader_config try: with tempfile.TemporaryDirectory() as tmpdir: accelerator.dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=True) prepared_dl = create_dataloader( accelerator, dataset_size=32 * accelerator.num_processes, batch_size=4, iterable=True, shuffle=True ) untrained_batches = [] # Calculate what step that will be total_batches = 32 * accelerator.num_processes // (4 * accelerator.num_processes) last_batch_num = total_batches - 1 for step, batch in enumerate(prepared_dl): # Step just before if step == last_batch_num - 1: accelerator.save_state(tmpdir) if step >= last_batch_num: # Otherwise grab the "unseen" batches untrained_batches.append(batch) not_skipped_batches = accelerator.gather(untrained_batches) accelerator.load_state(tmpdir) resumed_batches = [] for batch in prepared_dl: resumed_batches.append(batch) resumed_batches = accelerator.gather(resumed_batches) for b1, b2 in zip(not_skipped_batches, resumed_batches): for v1, v2 in zip(b1, b2): assert torch.equal(v1, v2), f"Batch {b1} and {b2} are not equal" finally: accelerator.dataloader_config = old_dataloader_config def main(): accelerator = create_accelerator() torch.manual_seed(accelerator.process_index) accelerator.print("Test that even_batches variable ensures uniform batches across processes") test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled") test_can_disable_even_batches() accelerator.print("Test joining uneven inputs") test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs") test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types") test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders") test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning") original_state = accelerator.state.distributed_type accelerator.state.distributed_type = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(accelerator) accelerator.state.distributed_type = original_state accelerator.print("Test pickling an accelerator") test_pickle_accelerator() dataset = DummyDataset() # Conventional Dataloader with shuffle=False loader = DataLoader(dataset, shuffle=False, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS) test_data_loader(loader, accelerator) # Conventional Dataloader with shuffle=True loader = DataLoader(dataset, shuffle=True, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS) test_data_loader(loader, accelerator) # Dataloader with batch_sampler sampler = BatchSampler(RandomSampler(dataset), batch_size=BATCH_SIZE, drop_last=False) loader = DataLoader(dataset, batch_sampler=sampler, num_workers=NUM_WORKERS) test_data_loader(loader, accelerator) # Dataloader with sampler as an instance of `BatchSampler` sampler = BatchSampler(RandomSampler(dataset), batch_size=BATCH_SIZE, drop_last=False) loader = DataLoader(dataset, sampler=sampler, batch_size=None, collate_fn=default_collate, num_workers=NUM_WORKERS) test_data_loader(loader, accelerator) test_stateful_dataloader(accelerator) test_stateful_dataloader_save_state(accelerator) accelerator.end_training() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/test_distributed_data_loop.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_distributed_data_loop.py", "repo_id": "accelerate", "token_count": 5904 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import importlib.metadata import os import warnings from functools import lru_cache, wraps import torch from packaging import version from packaging.version import parse from .environment import parse_flag_from_env, patch_environment, str_to_bool from .versions import compare_versions, is_torch_version # Try to run Torch native job in an environment with TorchXLA installed by setting this value to 0. USE_TORCH_XLA = parse_flag_from_env("USE_TORCH_XLA", default=True) _torch_xla_available = False if USE_TORCH_XLA: try: import torch_xla.core.xla_model as xm # noqa: F401 import torch_xla.runtime _torch_xla_available = True except ImportError: pass # Keep it for is_tpu_available. It will be removed along with is_tpu_available. _tpu_available = _torch_xla_available # Cache this result has it's a C FFI call which can be pretty time-consuming _torch_distributed_available = torch.distributed.is_available() def _is_package_available(pkg_name, metadata_name=None): # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version package_exists = importlib.util.find_spec(pkg_name) is not None if package_exists: try: # Some libraries have different names in the metadata _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name) return True except importlib.metadata.PackageNotFoundError: return False def is_torch_distributed_available() -> bool: return _torch_distributed_available def is_ccl_available(): try: pass except ImportError: print( "Intel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) GPUs, but it is not" " detected. If you see \"ValueError: Invalid backend: 'ccl'\" error, please install Intel(R) oneCCL" " Bindings for PyTorch*." ) return ( importlib.util.find_spec("torch_ccl") is not None or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None ) def get_ccl_version(): return importlib.metadata.version("oneccl_bind_pt") def is_import_timer_available(): return _is_package_available("import_timer") def is_pynvml_available(): return _is_package_available("pynvml") or _is_package_available("pynvml", "nvidia-ml-py") def is_pytest_available(): return _is_package_available("pytest") def is_msamp_available(): return _is_package_available("msamp", "ms-amp") def is_schedulefree_available(): return _is_package_available("schedulefree") def is_transformer_engine_available(): return _is_package_available("transformer_engine", "transformer-engine") def is_lomo_available(): return _is_package_available("lomo_optim") def is_fp8_available(): return is_msamp_available() or is_transformer_engine_available() def is_cuda_available(): """ Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda uninitialized. """ with patch_environment(PYTORCH_NVML_BASED_CUDA_CHECK="1"): available = torch.cuda.is_available() return available @lru_cache def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False): """ Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set the USE_TORCH_XLA to false. """ assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true." if not _torch_xla_available: return False elif check_is_gpu: return torch_xla.runtime.device_type() in ["GPU", "CUDA"] elif check_is_tpu: return torch_xla.runtime.device_type() == "TPU" return True def is_deepspeed_available(): if is_mlu_available(): return _is_package_available("deepspeed", metadata_name="deepspeed-mlu") return _is_package_available("deepspeed") def is_pippy_available(): return is_torch_version(">=", "2.4.0") def is_bf16_available(ignore_tpu=False): "Checks if bf16 is supported, optionally ignoring the TPU" if is_torch_xla_available(check_is_tpu=True): return not ignore_tpu if is_cuda_available(): return torch.cuda.is_bf16_supported() if is_mps_available(): return False return True def is_4bit_bnb_available(): package_exists = _is_package_available("bitsandbytes") if package_exists: bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) return compare_versions(bnb_version, ">=", "0.39.0") return False def is_8bit_bnb_available(): package_exists = _is_package_available("bitsandbytes") if package_exists: bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) return compare_versions(bnb_version, ">=", "0.37.2") return False def is_bnb_available(min_version=None): package_exists = _is_package_available("bitsandbytes") if package_exists and min_version is not None: bnb_version = version.parse(importlib.metadata.version("bitsandbytes")) return compare_versions(bnb_version, ">=", min_version) else: return package_exists def is_bitsandbytes_multi_backend_available(): if not is_bnb_available(): return False import bitsandbytes as bnb return "multi_backend" in getattr(bnb, "features", set()) def is_torchvision_available(): return _is_package_available("torchvision") def is_megatron_lm_available(): if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1: if importlib.util.find_spec("megatron") is not None: try: megatron_version = parse(importlib.metadata.version("megatron-core")) if compare_versions(megatron_version, ">=", "0.8.0"): return importlib.util.find_spec(".training", "megatron") except Exception as e: warnings.warn(f"Parse Megatron version failed. Exception:{e}") return False def is_transformers_available(): return _is_package_available("transformers") def is_datasets_available(): return _is_package_available("datasets") def is_peft_available(): return _is_package_available("peft") def is_timm_available(): return _is_package_available("timm") def is_triton_available(): if is_xpu_available(): return _is_package_available("triton", "pytorch-triton-xpu") return _is_package_available("triton") def is_aim_available(): package_exists = _is_package_available("aim") if package_exists: aim_version = version.parse(importlib.metadata.version("aim")) return compare_versions(aim_version, "<", "4.0.0") return False def is_tensorboard_available(): return _is_package_available("tensorboard") or _is_package_available("tensorboardX") def is_wandb_available(): return _is_package_available("wandb") def is_comet_ml_available(): return _is_package_available("comet_ml") def is_boto3_available(): return _is_package_available("boto3") def is_rich_available(): if _is_package_available("rich"): return parse_flag_from_env("ACCELERATE_ENABLE_RICH", False) return False def is_sagemaker_available(): return _is_package_available("sagemaker") def is_tqdm_available(): return _is_package_available("tqdm") def is_clearml_available(): return _is_package_available("clearml") def is_pandas_available(): return _is_package_available("pandas") def is_mlflow_available(): if _is_package_available("mlflow"): return True if importlib.util.find_spec("mlflow") is not None: try: _ = importlib.metadata.metadata("mlflow-skinny") return True except importlib.metadata.PackageNotFoundError: return False return False def is_mps_available(min_version="1.12"): "Checks if MPS device is available. The minimum version required is 1.12." # With torch 1.12, you can use torch.backends.mps # With torch 2.0.0, you can use torch.mps return is_torch_version(">=", min_version) and torch.backends.mps.is_available() and torch.backends.mps.is_built() def is_ipex_available(): "Checks if ipex is installed." def get_major_and_minor_from_version(full_version): return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor) _torch_version = importlib.metadata.version("torch") if importlib.util.find_spec("intel_extension_for_pytorch") is None: return False _ipex_version = "N/A" try: _ipex_version = importlib.metadata.version("intel_extension_for_pytorch") except importlib.metadata.PackageNotFoundError: return False torch_major_and_minor = get_major_and_minor_from_version(_torch_version) ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version) if torch_major_and_minor != ipex_major_and_minor: warnings.warn( f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*," f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again." ) return False return True @lru_cache def is_mlu_available(check_device=False): """ Checks if `mlu` is available via an `cndev-based` check which won't trigger the drivers and leave mlu uninitialized. """ if importlib.util.find_spec("torch_mlu") is None: return False import torch_mlu # noqa: F401 with patch_environment(PYTORCH_CNDEV_BASED_MLU_CHECK="1"): available = torch.mlu.is_available() return available @lru_cache def is_musa_available(check_device=False): "Checks if `torch_musa` is installed and potentially if a MUSA is in the environment" if importlib.util.find_spec("torch_musa") is None: return False import torch_musa # noqa: F401 if check_device: try: # Will raise a RuntimeError if no MUSA is found _ = torch.musa.device_count() return torch.musa.is_available() except RuntimeError: return False return hasattr(torch, "musa") and torch.musa.is_available() @lru_cache def is_npu_available(check_device=False): "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" if importlib.util.find_spec("torch_npu") is None: return False import torch_npu # noqa: F401 if check_device: try: # Will raise a RuntimeError if no NPU is found _ = torch.npu.device_count() return torch.npu.is_available() except RuntimeError: return False return hasattr(torch, "npu") and torch.npu.is_available() @lru_cache def is_xpu_available(check_device=False): """ Checks if XPU acceleration is available either via `intel_extension_for_pytorch` or via stock PyTorch (>=2.4) and potentially if a XPU is in the environment """ "check if user disables it explicitly" if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True): return False if is_ipex_available(): import intel_extension_for_pytorch # noqa: F401 else: if is_torch_version("<=", "2.3"): return False if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available() def is_dvclive_available(): return _is_package_available("dvclive") def is_torchdata_available(): return _is_package_available("torchdata") # TODO: Remove this function once stateful_dataloader is a stable feature in torchdata. def is_torchdata_stateful_dataloader_available(): package_exists = _is_package_available("torchdata") if package_exists: torchdata_version = version.parse(importlib.metadata.version("torchdata")) return compare_versions(torchdata_version, ">=", "0.8.0") return False # TODO: Rework this into `utils.deepspeed` and migrate the "core" chunks into `accelerate.deepspeed` def deepspeed_required(func): """ A decorator that ensures the decorated function is only called when deepspeed is enabled. """ @wraps(func) def wrapper(*args, **kwargs): from accelerate.state import AcceleratorState from accelerate.utils.dataclasses import DistributedType if AcceleratorState._shared_state != {} and AcceleratorState().distributed_type != DistributedType.DEEPSPEED: raise ValueError( "DeepSpeed is not enabled, please make sure that an `Accelerator` is configured for `deepspeed` " "before calling this function." ) return func(*args, **kwargs) return wrapper def is_weights_only_available(): # Weights only with allowlist was added in 2.4.0 # ref: https://github.com/pytorch/pytorch/pull/124331 return is_torch_version(">=", "2.4.0") def is_numpy_available(min_version="1.25.0"): numpy_version = parse(importlib.metadata.version("numpy")) return compare_versions(numpy_version, ">=", min_version)
accelerate/src/accelerate/utils/imports.py/0
{ "file_path": "accelerate/src/accelerate/utils/imports.py", "repo_id": "accelerate", "token_count": 5400 }
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
accelerate/tests/deepspeed/ds_config_zero3.json/0
{ "file_path": "accelerate/tests/deepspeed/ds_config_zero3.json", "repo_id": "accelerate", "token_count": 825 }
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{ quantized::{self, GgmlDType, QMatMul}, Device, Module, Tensor, }; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn run(matmul: &QMatMul, x: &Tensor) { matmul.forward(x).unwrap(); } fn run_bench(c: &mut Criterion, device: &Device, dtype: GgmlDType) { let b = 1; let m = 1; let n = 1024; let k = 1024; let lhs = (0..(m * k)) .map(|v| v as f32 / (m * k) as f32) .collect::<Vec<_>>(); let rhs = (0..(k * n)) .map(|v| v as f32 / (n * k) as f32) .collect::<Vec<_>>(); let lhs = Tensor::from_slice(&lhs, (m, k), device).unwrap(); let rhs = Tensor::from_slice(&rhs, (k, n), device).unwrap(); let qtensor = quantized::QTensor::quantize(&rhs.t().unwrap(), dtype).unwrap(); let matmul = quantized::QMatMul::from_qtensor(qtensor).unwrap(); let flops = b * m * n * k; let mut group = c.benchmark_group(device.bench_name(format!("qmatmul_{:?}", dtype))); group.sample_size(200); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run(black_box(&matmul), black_box(&lhs)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { for dtype in [ GgmlDType::F32, GgmlDType::F16, GgmlDType::Q4_0, GgmlDType::Q4_1, GgmlDType::Q5_0, GgmlDType::Q5_1, GgmlDType::Q8_0, GgmlDType::Q2K, GgmlDType::Q3K, GgmlDType::Q4K, GgmlDType::Q5K, GgmlDType::Q6K, ] { run_bench(c, &device, dtype); } } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/qmatmul.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/qmatmul.rs", "repo_id": "candle", "token_count": 1085 }
pub trait VecOps: num_traits::NumAssign + Copy { fn min(self, rhs: Self) -> Self; fn max(self, rhs: Self) -> Self; /// Dot-product of two vectors. /// /// # Safety /// /// The length of `lhs` and `rhs` have to be at least `len`. `res` has to point to a valid /// element. #[inline(always)] unsafe fn vec_dot(lhs: *const Self, rhs: *const Self, res: *mut Self, len: usize) { *res = Self::zero(); for i in 0..len { *res += *lhs.add(i) * *rhs.add(i) } } /// Sum of all elements in a vector. /// /// # Safety /// /// The length of `xs` must be at least `len`. `res` has to point to a valid /// element. #[inline(always)] unsafe fn vec_reduce_sum(xs: *const Self, res: *mut Self, len: usize) { *res = Self::zero(); for i in 0..len { *res += *xs.add(i) } } /// Maximum element in a non-empty vector. /// /// # Safety /// /// The length of `xs` must be at least `len` and positive. `res` has to point to a valid /// element. #[inline(always)] unsafe fn vec_reduce_max(xs: *const Self, res: *mut Self, len: usize) { *res = *xs; for i in 1..len { *res = (*res).max(*xs.add(i)) } } /// Minimum element in a non-empty vector. /// /// # Safety /// /// The length of `xs` must be at least `len` and positive. `res` has to point to a valid /// element. #[inline(always)] unsafe fn vec_reduce_min(xs: *const Self, res: *mut Self, len: usize) { *res = *xs; for i in 1..len { *res = (*res).min(*xs.add(i)) } } } impl VecOps for f32 { #[inline(always)] fn min(self, other: Self) -> Self { Self::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { Self::max(self, other) } #[inline(always)] unsafe fn vec_dot(lhs: *const Self, rhs: *const Self, res: *mut Self, len: usize) { super::vec_dot_f32(lhs, rhs, res, len) } #[inline(always)] unsafe fn vec_reduce_sum(xs: *const Self, res: *mut Self, len: usize) { super::vec_sum(xs, res, len) } } impl VecOps for half::f16 { #[inline(always)] fn min(self, other: Self) -> Self { Self::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { Self::max(self, other) } #[inline(always)] unsafe fn vec_dot(lhs: *const Self, rhs: *const Self, res: *mut Self, len: usize) { let mut res_f32 = 0f32; super::vec_dot_f16(lhs, rhs, &mut res_f32, len); *res = half::f16::from_f32(res_f32); } } impl VecOps for f64 { #[inline(always)] fn min(self, other: Self) -> Self { Self::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { Self::max(self, other) } } impl VecOps for half::bf16 { #[inline(always)] fn min(self, other: Self) -> Self { Self::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { Self::max(self, other) } } impl VecOps for u8 { #[inline(always)] fn min(self, other: Self) -> Self { <Self as Ord>::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { <Self as Ord>::max(self, other) } } impl VecOps for u32 { #[inline(always)] fn min(self, other: Self) -> Self { <Self as Ord>::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { <Self as Ord>::max(self, other) } } impl VecOps for i64 { #[inline(always)] fn min(self, other: Self) -> Self { <Self as Ord>::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { <Self as Ord>::max(self, other) } } #[inline(always)] pub fn par_for_each(n_threads: usize, func: impl Fn(usize) + Send + Sync) { if n_threads == 1 { func(0) } else { rayon::scope(|s| { for thread_idx in 0..n_threads { let func = &func; s.spawn(move |_| func(thread_idx)); } }) } } #[inline(always)] pub fn par_range(lo: usize, up: usize, n_threads: usize, func: impl Fn(usize) + Send + Sync) { if n_threads == 1 { for i in lo..up { func(i) } } else { rayon::scope(|s| { for thread_idx in 0..n_threads { let func = &func; s.spawn(move |_| { for i in (thread_idx..up).step_by(n_threads) { func(i) } }); } }) } }
candle/candle-core/src/cpu/kernels.rs/0
{ "file_path": "candle/candle-core/src/cpu/kernels.rs", "repo_id": "candle", "token_count": 2328 }
#![allow(dead_code)] use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Error, Layout, Result, Shape}; #[derive(Debug, Clone)] pub struct MetalDevice; #[derive(Debug)] pub struct MetalStorage; #[derive(thiserror::Error, Debug)] pub enum MetalError { #[error("{0}")] Message(String), } impl From<String> for MetalError { fn from(e: String) -> Self { MetalError::Message(e) } } macro_rules! fail { () => { unimplemented!("metal support has not been enabled, add `metal` feature to enable.") }; } impl crate::backend::BackendStorage for MetalStorage { type Device = MetalDevice; fn try_clone(&self, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn dtype(&self) -> DType { fail!() } fn device(&self) -> &Self::Device { fail!() } fn to_cpu_storage(&self) -> Result<CpuStorage> { Err(Error::NotCompiledWithMetalSupport) } fn affine(&self, _: &Layout, _: f64, _: f64) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn powf(&self, _: &Layout, _: f64) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn elu(&self, _: &Layout, _: f64) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn reduce_op(&self, _: ReduceOp, _: &Layout, _: &[usize]) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn cmp(&self, _: CmpOp, _: &Self, _: &Layout, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn to_dtype(&self, _: &Layout, _: DType) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn unary_impl<B: UnaryOpT>(&self, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn binary_impl<B: BinaryOpT>(&self, _: &Self, _: &Layout, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn where_cond(&self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn conv1d( &self, _: &Layout, _: &Self, _: &Layout, _: &crate::conv::ParamsConv1D, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn conv_transpose1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn conv2d( &self, _: &Layout, _: &Self, _: &Layout, _: &crate::conv::ParamsConv2D, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn conv_transpose2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn index_select(&self, _: &Self, _: &Layout, _: &Layout, _: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn gather(&self, _: &Layout, _: &Self, _: &Layout, _: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn scatter_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn index_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn matmul( &self, _: &Self, _: (usize, usize, usize, usize), _: &Layout, _: &Layout, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn copy_strided_src(&self, _: &mut Self, _: usize, _: &Layout) -> Result<()> { Err(Error::NotCompiledWithMetalSupport) } fn copy2d( &self, _: &mut Self, _: usize, _: usize, _: usize, _: usize, _: usize, _: usize, ) -> Result<()> { Err(Error::NotCompiledWithMetalSupport) } fn avg_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn max_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn upsample_nearest2d(&self, _: &Layout, _: usize, _: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } } impl crate::backend::BackendDevice for MetalDevice { type Storage = MetalStorage; fn new(_: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn set_seed(&self, _: u64) -> Result<()> { Err(Error::NotCompiledWithMetalSupport) } fn location(&self) -> crate::DeviceLocation { fail!() } fn same_device(&self, _: &Self) -> bool { fail!() } fn zeros_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn ones_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } unsafe fn alloc_uninit(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn storage_from_slice<T: crate::WithDType>(&self, _: &[T]) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn storage_from_cpu_storage(&self, _: &CpuStorage) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn storage_from_cpu_storage_owned(&self, _: CpuStorage) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn rand_uniform(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn rand_normal(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn synchronize(&self) -> Result<()> { Ok(()) } }
candle/candle-core/src/dummy_metal_backend.rs/0
{ "file_path": "candle/candle-core/src/dummy_metal_backend.rs", "repo_id": "candle", "token_count": 3042 }
//! Support for the [GGUF file format](https://github.com/philpax/ggml/blob/gguf-spec/docs/gguf.md). //! use super::{GgmlDType, QTensor}; use crate::{Context, Device, Result}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use std::collections::HashMap; pub const DEFAULT_ALIGNMENT: u64 = 32; #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Magic { Gguf, } impl TryFrom<u32> for Magic { type Error = crate::Error; fn try_from(value: u32) -> Result<Self> { let magic = match value { 0x46554747 | 0x47475546 => Self::Gguf, _ => crate::bail!("unknown magic 0x{value:08x}"), }; Ok(magic) } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum VersionedMagic { GgufV1, GgufV2, GgufV3, } impl VersionedMagic { fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> { let magic = reader.read_u32::<LittleEndian>()?; let magic = Magic::try_from(magic)?; let version = reader.read_u32::<LittleEndian>()?; let versioned_magic = match (magic, version) { (Magic::Gguf, 1) => Self::GgufV1, (Magic::Gguf, 2) => Self::GgufV2, (Magic::Gguf, 3) => Self::GgufV3, _ => crate::bail!("gguf: unsupported magic/version {magic:?}/{version}"), }; Ok(versioned_magic) } } #[derive(Debug)] pub struct TensorInfo { pub ggml_dtype: GgmlDType, pub shape: crate::Shape, pub offset: u64, } impl TensorInfo { pub fn read<R: std::io::Seek + std::io::Read>( &self, reader: &mut R, tensor_data_offset: u64, device: &Device, ) -> Result<QTensor> { let tensor_elems = self.shape.elem_count(); let block_size = self.ggml_dtype.block_size(); if tensor_elems % block_size != 0 { crate::bail!( "the number of elements {tensor_elems} is not divisible by the block size {block_size}" ) } let size_in_bytes = tensor_elems / block_size * self.ggml_dtype.type_size(); let mut raw_data = vec![0u8; size_in_bytes]; reader.seek(std::io::SeekFrom::Start(tensor_data_offset + self.offset))?; reader.read_exact(&mut raw_data)?; super::ggml_file::qtensor_from_ggml( self.ggml_dtype, &raw_data, self.shape.dims().to_vec(), device, ) } } #[derive(Debug)] pub struct Content { pub magic: VersionedMagic, pub metadata: HashMap<String, Value>, pub tensor_infos: HashMap<String, TensorInfo>, pub tensor_data_offset: u64, } fn read_string<R: std::io::Read>(reader: &mut R, magic: &VersionedMagic) -> Result<String> { let len = match magic { VersionedMagic::GgufV1 => reader.read_u32::<LittleEndian>()? as usize, VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { reader.read_u64::<LittleEndian>()? as usize } }; let mut v = vec![0u8; len]; reader.read_exact(&mut v)?; // GGUF strings are supposed to be non-null terminated but in practice this happens. while let Some(0) = v.last() { v.pop(); } // GGUF strings are utf8 encoded but there are cases that don't seem to be valid. Ok(String::from_utf8_lossy(&v).into_owned()) } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum ValueType { // The value is a 8-bit unsigned integer. U8, // The value is a 8-bit signed integer. I8, // The value is a 16-bit unsigned little-endian integer. U16, // The value is a 16-bit signed little-endian integer. I16, // The value is a 32-bit unsigned little-endian integer. U32, // The value is a 32-bit signed little-endian integer. I32, // The value is a 64-bit unsigned little-endian integer. U64, // The value is a 64-bit signed little-endian integer. I64, // The value is a 32-bit IEEE754 floating point number. F32, // The value is a 64-bit IEEE754 floating point number. F64, // The value is a boolean. // 1-byte value where 0 is false and 1 is true. // Anything else is invalid, and should be treated as either the model being invalid or the reader being buggy. Bool, // The value is a UTF-8 non-null-terminated string, with length prepended. String, // The value is an array of other values, with the length and type prepended. // Arrays can be nested, and the length of the array is the number of elements in the array, not the number of bytes. Array, } #[derive(Debug, Clone)] pub enum Value { U8(u8), I8(i8), U16(u16), I16(i16), U32(u32), I32(i32), U64(u64), I64(i64), F32(f32), F64(f64), Bool(bool), String(String), Array(Vec<Value>), } impl Value { pub fn value_type(&self) -> ValueType { match self { Self::U8(_) => ValueType::U8, Self::I8(_) => ValueType::I8, Self::U16(_) => ValueType::U16, Self::I16(_) => ValueType::I16, Self::U32(_) => ValueType::U32, Self::I32(_) => ValueType::I32, Self::U64(_) => ValueType::U64, Self::I64(_) => ValueType::I64, Self::F32(_) => ValueType::F32, Self::F64(_) => ValueType::F64, Self::Bool(_) => ValueType::Bool, Self::String(_) => ValueType::String, Self::Array(_) => ValueType::Array, } } pub fn to_u8(&self) -> Result<u8> { match self { Self::U8(v) => Ok(*v), v => crate::bail!("not a u8 {v:?}"), } } pub fn to_i8(&self) -> Result<i8> { match self { Self::I8(v) => Ok(*v), v => crate::bail!("not a i8 {v:?}"), } } pub fn to_u16(&self) -> Result<u16> { match self { Self::U16(v) => Ok(*v), v => crate::bail!("not a u16 {v:?}"), } } pub fn to_i16(&self) -> Result<i16> { match self { Self::I16(v) => Ok(*v), v => crate::bail!("not a i16 {v:?}"), } } pub fn to_u32(&self) -> Result<u32> { match self { Self::U32(v) => Ok(*v), v => crate::bail!("not a u32 {v:?}"), } } pub fn to_i32(&self) -> Result<i32> { match self { Self::I32(v) => Ok(*v), v => crate::bail!("not a i32 {v:?}"), } } /// This will also automatically upcast any integral types which will not truncate. pub fn to_u64(&self) -> Result<u64> { match self { Self::U64(v) => Ok(*v), // Autoupcast cases here Self::U8(v) => Ok(*v as u64), Self::U16(v) => Ok(*v as u64), Self::U32(v) => Ok(*v as u64), Self::Bool(v) => Ok(*v as u64), v => crate::bail!("not a u64 or upcastable to u64 {v:?}"), } } pub fn to_i64(&self) -> Result<i64> { match self { Self::I64(v) => Ok(*v), v => crate::bail!("not a i64 {v:?}"), } } pub fn to_f32(&self) -> Result<f32> { match self { Self::F32(v) => Ok(*v), v => crate::bail!("not a f32 {v:?}"), } } pub fn to_f64(&self) -> Result<f64> { match self { Self::F64(v) => Ok(*v), v => crate::bail!("not a f64 {v:?}"), } } pub fn to_bool(&self) -> Result<bool> { match self { Self::Bool(v) => Ok(*v), v => crate::bail!("not a bool {v:?}"), } } pub fn to_vec(&self) -> Result<&Vec<Value>> { match self { Self::Array(v) => Ok(v), v => crate::bail!("not a vec {v:?}"), } } pub fn to_string(&self) -> Result<&String> { match self { Self::String(v) => Ok(v), v => crate::bail!("not a string {v:?}"), } } fn read<R: std::io::Read>( reader: &mut R, value_type: ValueType, magic: &VersionedMagic, ) -> Result<Self> { let v = match value_type { ValueType::U8 => Self::U8(reader.read_u8()?), ValueType::I8 => Self::I8(reader.read_i8()?), ValueType::U16 => Self::U16(reader.read_u16::<LittleEndian>()?), ValueType::I16 => Self::I16(reader.read_i16::<LittleEndian>()?), ValueType::U32 => Self::U32(reader.read_u32::<LittleEndian>()?), ValueType::I32 => Self::I32(reader.read_i32::<LittleEndian>()?), ValueType::U64 => Self::U64(reader.read_u64::<LittleEndian>()?), ValueType::I64 => Self::I64(reader.read_i64::<LittleEndian>()?), ValueType::F32 => Self::F32(reader.read_f32::<LittleEndian>()?), ValueType::F64 => Self::F64(reader.read_f64::<LittleEndian>()?), ValueType::Bool => match reader.read_u8()? { 0 => Self::Bool(false), 1 => Self::Bool(true), b => crate::bail!("unexpected bool value {b}"), }, ValueType::String => Self::String(read_string(reader, magic)?), ValueType::Array => { let value_type = reader.read_u32::<LittleEndian>()?; let value_type = ValueType::from_u32(value_type)?; let len = match magic { VersionedMagic::GgufV1 => reader.read_u32::<LittleEndian>()? as usize, VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { reader.read_u64::<LittleEndian>()? as usize } }; let mut vs = Vec::with_capacity(len); for _ in 0..len { vs.push(Value::read(reader, value_type, magic)?) } Self::Array(vs) } }; Ok(v) } fn write<W: std::io::Write>(&self, w: &mut W) -> Result<()> { match self { &Self::U8(v) => w.write_u8(v)?, &Self::I8(v) => w.write_i8(v)?, &Self::U16(v) => w.write_u16::<LittleEndian>(v)?, &Self::I16(v) => w.write_i16::<LittleEndian>(v)?, &Self::U32(v) => w.write_u32::<LittleEndian>(v)?, &Self::I32(v) => w.write_i32::<LittleEndian>(v)?, &Self::U64(v) => w.write_u64::<LittleEndian>(v)?, &Self::I64(v) => w.write_i64::<LittleEndian>(v)?, &Self::F32(v) => w.write_f32::<LittleEndian>(v)?, &Self::F64(v) => w.write_f64::<LittleEndian>(v)?, &Self::Bool(v) => w.write_u8(u8::from(v))?, Self::String(v) => write_string(w, v.as_str())?, Self::Array(v) => { // The `Value` type does not enforce that all the values in an Array have the same // type. let value_type = if v.is_empty() { // Doesn't matter, the array is empty. ValueType::U32 } else { let value_type: std::collections::HashSet<_> = v.iter().map(|elem| elem.value_type()).collect(); if value_type.len() != 1 { crate::bail!("multiple value-types in the same array {value_type:?}") } value_type.into_iter().next().context("empty value_type")? }; w.write_u32::<LittleEndian>(value_type.to_u32())?; w.write_u64::<LittleEndian>(v.len() as u64)?; for elem in v.iter() { elem.write(w)? } } } Ok(()) } } impl ValueType { fn from_u32(v: u32) -> Result<Self> { let v = match v { 0 => Self::U8, 1 => Self::I8, 2 => Self::U16, 3 => Self::I16, 4 => Self::U32, 5 => Self::I32, 6 => Self::F32, 7 => Self::Bool, 8 => Self::String, 9 => Self::Array, 10 => Self::U64, 11 => Self::I64, 12 => Self::F64, v => crate::bail!("unrecognized value-type {v:#08x}"), }; Ok(v) } fn to_u32(self) -> u32 { match self { Self::U8 => 0, Self::I8 => 1, Self::U16 => 2, Self::I16 => 3, Self::U32 => 4, Self::I32 => 5, Self::F32 => 6, Self::Bool => 7, Self::String => 8, Self::Array => 9, Self::U64 => 10, Self::I64 => 11, Self::F64 => 12, } } } impl Content { pub fn read<R: std::io::Seek + std::io::Read>(reader: &mut R) -> Result<Self> { let magic = VersionedMagic::read(reader)?; let tensor_count = match magic { VersionedMagic::GgufV1 => reader.read_u32::<LittleEndian>()? as usize, VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { reader.read_u64::<LittleEndian>()? as usize } }; let metadata_kv_count = match magic { VersionedMagic::GgufV1 => reader.read_u32::<LittleEndian>()? as usize, VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { reader.read_u64::<LittleEndian>()? as usize } }; let mut metadata = HashMap::new(); for _idx in 0..metadata_kv_count { let key = read_string(reader, &magic)?; let value_type = reader.read_u32::<LittleEndian>()?; let value_type = ValueType::from_u32(value_type)?; let value = Value::read(reader, value_type, &magic)?; metadata.insert(key, value); } let mut tensor_infos = HashMap::new(); for _idx in 0..tensor_count { let tensor_name = read_string(reader, &magic)?; let n_dimensions = reader.read_u32::<LittleEndian>()?; let mut dimensions: Vec<usize> = match magic { VersionedMagic::GgufV1 => { let mut dimensions = vec![0; n_dimensions as usize]; reader.read_u32_into::<LittleEndian>(&mut dimensions)?; dimensions.into_iter().map(|c| c as usize).collect() } VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { let mut dimensions = vec![0; n_dimensions as usize]; reader.read_u64_into::<LittleEndian>(&mut dimensions)?; dimensions.into_iter().map(|c| c as usize).collect() } }; dimensions.reverse(); let ggml_dtype = reader.read_u32::<LittleEndian>()?; let ggml_dtype = GgmlDType::from_u32(ggml_dtype)?; let offset = reader.read_u64::<LittleEndian>()?; tensor_infos.insert( tensor_name, TensorInfo { shape: crate::Shape::from(dimensions), offset, ggml_dtype, }, ); } let position = reader.stream_position()?; let alignment = match metadata.get("general.alignment") { Some(Value::U8(v)) => *v as u64, Some(Value::U16(v)) => *v as u64, Some(Value::U32(v)) => *v as u64, Some(Value::I8(v)) if *v >= 0 => *v as u64, Some(Value::I16(v)) if *v >= 0 => *v as u64, Some(Value::I32(v)) if *v >= 0 => *v as u64, _ => DEFAULT_ALIGNMENT, }; let tensor_data_offset = position.div_ceil(alignment) * alignment; Ok(Self { magic, metadata, tensor_infos, tensor_data_offset, }) } pub fn tensor<R: std::io::Seek + std::io::Read>( &self, reader: &mut R, name: &str, device: &Device, ) -> Result<QTensor> { let tensor_info = match self.tensor_infos.get(name) { Some(tensor_info) => tensor_info, None => crate::bail!("cannot find tensor info for {name}"), }; tensor_info.read(reader, self.tensor_data_offset, device) } } fn write_string<W: std::io::Write>(w: &mut W, str: &str) -> Result<()> { let bytes = str.as_bytes(); w.write_u64::<LittleEndian>(bytes.len() as u64)?; w.write_all(bytes)?; Ok(()) } pub fn write<W: std::io::Seek + std::io::Write>( w: &mut W, metadata: &[(&str, &Value)], tensors: &[(&str, &QTensor)], ) -> Result<()> { w.write_u32::<LittleEndian>(0x46554747)?; w.write_u32::<LittleEndian>(2)?; // version 2. w.write_u64::<LittleEndian>(tensors.len() as u64)?; w.write_u64::<LittleEndian>(metadata.len() as u64)?; for (name, value) in metadata.iter() { write_string(w, name)?; w.write_u32::<LittleEndian>(value.value_type().to_u32())?; value.write(w)?; } let mut offset = 0usize; let mut offsets = Vec::with_capacity(tensors.len()); for (name, tensor) in tensors.iter() { write_string(w, name)?; let dims = tensor.shape().dims(); w.write_u32::<LittleEndian>(dims.len() as u32)?; for &dim in dims.iter().rev() { w.write_u64::<LittleEndian>(dim as u64)?; } w.write_u32::<LittleEndian>(tensor.dtype().to_u32())?; w.write_u64::<LittleEndian>(offset as u64)?; offsets.push(offset); let size_in_bytes = tensor.storage_size_in_bytes(); let padding = 31 - (31 + size_in_bytes) % 32; offset += size_in_bytes + padding; } let pos = w.stream_position()? as usize; let padding = 31 - (31 + pos) % 32; w.write_all(&vec![0u8; padding])?; let tensor_start_pos = w.stream_position()? as usize; for (offset, (_name, tensor)) in offsets.iter().zip(tensors.iter()) { let pos = w.stream_position()? as usize; if tensor_start_pos + offset != pos { crate::bail!( "internal error, unexpected current position {tensor_start_pos} {offset} {pos}" ) } let data = tensor.data()?; let size_in_bytes = data.len(); w.write_all(&data)?; let padding = 31 - (31 + size_in_bytes) % 32; w.write_all(&vec![0u8; padding])?; } Ok(()) }
candle/candle-core/src/quantized/gguf_file.rs/0
{ "file_path": "candle/candle-core/src/quantized/gguf_file.rs", "repo_id": "candle", "token_count": 9550 }
use crate::{Result, Tensor}; #[macro_export] macro_rules! test_device { // TODO: Switch to generating the two last arguments automatically once concat_idents is // stable. https://github.com/rust-lang/rust/issues/29599 ($fn_name: ident, $test_cpu: ident, $test_cuda: ident, $test_metal: ident) => { #[test] fn $test_cpu() -> Result<()> { $fn_name(&Device::Cpu) } #[cfg(feature = "cuda")] #[test] fn $test_cuda() -> Result<()> { $fn_name(&Device::new_cuda(0)?) } #[cfg(feature = "metal")] #[test] fn $test_metal() -> Result<()> { $fn_name(&Device::new_metal(0)?) } }; } pub fn to_vec0_round(t: &Tensor, digits: i32) -> Result<f32> { let b = 10f32.powi(digits); let t = t.to_vec0::<f32>()?; Ok(f32::round(t * b) / b) } pub fn to_vec1_round(t: &Tensor, digits: i32) -> Result<Vec<f32>> { let b = 10f32.powi(digits); let t = t.to_vec1::<f32>()?; let t = t.iter().map(|t| f32::round(t * b) / b).collect(); Ok(t) } pub fn to_vec2_round(t: &Tensor, digits: i32) -> Result<Vec<Vec<f32>>> { let b = 10f32.powi(digits); let t = t.to_vec2::<f32>()?; let t = t .iter() .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) .collect(); Ok(t) } pub fn to_vec3_round(t: &Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> { let b = 10f32.powi(digits); let t = t.to_vec3::<f32>()?; let t = t .iter() .map(|t| { t.iter() .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) .collect() }) .collect(); Ok(t) }
candle/candle-core/src/test_utils.rs/0
{ "file_path": "candle/candle-core/src/test_utils.rs", "repo_id": "candle", "token_count": 923 }
use candle_core::{DType, Result, Tensor}; struct TmpFile(std::path::PathBuf); impl TmpFile { fn create(base: &str) -> TmpFile { let filename = std::env::temp_dir().join(format!( "candle-{}-{}-{:?}", base, std::process::id(), std::thread::current().id(), )); TmpFile(filename) } } impl std::convert::AsRef<std::path::Path> for TmpFile { fn as_ref(&self) -> &std::path::Path { self.0.as_path() } } impl Drop for TmpFile { fn drop(&mut self) { std::fs::remove_file(&self.0).unwrap() } } #[test] fn npy() -> Result<()> { let npy = Tensor::read_npy("tests/test.npy")?; assert_eq!( npy.to_dtype(DType::U8)?.to_vec1::<u8>()?, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ); Ok(()) } #[test] fn npz() -> Result<()> { let npz = Tensor::read_npz("tests/test.npz")?; assert_eq!(npz.len(), 2); assert_eq!(npz[0].0, "x"); assert_eq!(npz[1].0, "x_plus_one"); assert_eq!( npz[1].1.to_dtype(DType::U8)?.to_vec1::<u8>()?, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ); Ok(()) } #[test] fn safetensors() -> Result<()> { use candle_core::safetensors::Load; let tmp_file = TmpFile::create("st"); let t = Tensor::arange(0f32, 24f32, &candle_core::Device::Cpu)?; t.save_safetensors("t", &tmp_file)?; // Load from file. let st = candle_core::safetensors::load(&tmp_file, &candle_core::Device::Cpu)?; let t2 = st.get("t").unwrap(); let diff = (&t - t2)?.abs()?.sum_all()?.to_vec0::<f32>()?; assert_eq!(diff, 0f32); // Load from bytes. let bytes = std::fs::read(tmp_file)?; let st = candle_core::safetensors::SliceSafetensors::new(&bytes)?; let t2 = st.get("t").unwrap().load(&candle_core::Device::Cpu); let diff = (&t - t2)?.abs()?.sum_all()?.to_vec0::<f32>()?; assert_eq!(diff, 0f32); Ok(()) }
candle/candle-core/tests/serialization_tests.rs/0
{ "file_path": "candle/candle-core/tests/serialization_tests.rs", "repo_id": "candle", "token_count": 981 }
[package] name = "candle-examples" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } candle-datasets = { workspace = true, optional = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } candle-flash-attn = { workspace = true, optional = true } candle-onnx = { workspace = true, optional = true } csv = "1.3.0" cudarc = { workspace = true, optional = true } half = { workspace = true, optional = true } hf-hub = { workspace = true, features = ["tokio"] } image = { workspace = true } intel-mkl-src = { workspace = true, optional = true } num-traits = { workspace = true } palette = { version = "0.7.6", optional = true } enterpolation = { version = "0.2.1", optional = true} pyo3 = { version = "0.22.0", features = ["auto-initialize", "abi3-py311"], optional = true } rayon = { workspace = true } rubato = { version = "0.15.0", optional = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } symphonia = { version = "0.5.3", features = ["all"], optional = true } tokenizers = { workspace = true, features = ["onig"] } cpal = { version = "0.15.2", optional = true } pdf2image = { version = "0.1.2" , optional = true} [dev-dependencies] anyhow = { workspace = true } byteorder = { workspace = true } clap = { workspace = true } imageproc = { workspace = true } memmap2 = { workspace = true } rand = { workspace = true } ab_glyph = { workspace = true } tracing = { workspace = true } tracing-chrome = { workspace = true } tracing-subscriber = { workspace = true } # Necessary to disambiguate with tokio in wasm examples which are 1.28.1 tokio = "1.43.0" [build-dependencies] anyhow = { workspace = true } bindgen_cuda = { version = "0.1.1", optional = true } [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate", "candle-transformers/accelerate"] cuda = ["candle/cuda", "candle-nn/cuda", "candle-transformers/cuda", "dep:bindgen_cuda"] cudnn = ["candle/cudnn"] flash-attn = ["cuda", "candle-transformers/flash-attn", "dep:candle-flash-attn"] mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl", "candle-transformers/mkl"] nccl = ["cuda", "cudarc/nccl", "dep:half"] onnx = ["candle-onnx"] metal = ["candle/metal", "candle-nn/metal"] microphone = ["cpal", "rubato"] encodec = ["cpal", "symphonia", "rubato"] mimi = ["cpal", "symphonia", "rubato"] depth_anything_v2 = ["palette", "enterpolation"] [[example]] name = "llama_multiprocess" required-features = ["cuda", "nccl", "flash-attn"] [[example]] name = "reinforcement-learning" required-features = ["pyo3"] [[example]] name = "onnx" required-features = ["onnx"] [[example]] name = "onnx_basics" required-features = ["onnx"] [[example]] name = "whisper" required-features = ["symphonia"] [[example]] name = "whisper-microphone" required-features = ["microphone"] [[example]] name = "mnist-training" required-features = ["candle-datasets"] [[example]] name = "llama2-c" required-features = ["candle-datasets"] [[example]] name = "mimi" required-features = ["mimi"] [[example]] name = "encodec" required-features = ["encodec"] [[example]] name = "depth_anything_v2" required-features = ["depth_anything_v2"] [[example]] name = "silero-vad" required-features = ["onnx"] [[example]] name = "colpali" required-features = ["pdf2image"]
candle/candle-examples/Cargo.toml/0
{ "file_path": "candle/candle-examples/Cargo.toml", "repo_id": "candle", "token_count": 1292 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::Parser; use candle::{DType, Device, Tensor}; use candle_nn::{ops::softmax, VarBuilder}; use candle_transformers::models::clip; use tokenizers::Tokenizer; #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long, use_value_delimiter = true)] images: Option<Vec<String>>, #[arg(long)] cpu: bool, #[arg(long, use_value_delimiter = true)] sequences: Option<Vec<String>>, } fn load_image<T: AsRef<std::path::Path>>(path: T, image_size: usize) -> anyhow::Result<Tensor> { let img = image::ImageReader::open(path)?.decode()?; let (height, width) = (image_size, image_size); let img = img.resize_to_fill( width as u32, height as u32, image::imageops::FilterType::Triangle, ); let img = img.to_rgb8(); let img = img.into_raw(); let img = Tensor::from_vec(img, (height, width, 3), &Device::Cpu)? .permute((2, 0, 1))? .to_dtype(DType::F32)? .affine(2. / 255., -1.)?; Ok(img) } fn load_images<T: AsRef<std::path::Path>>( paths: &Vec<T>, image_size: usize, ) -> anyhow::Result<Tensor> { let mut images = vec![]; for path in paths { let tensor = load_image(path, image_size)?; images.push(tensor); } let images = Tensor::stack(&images, 0)?; Ok(images) } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.repo(hf_hub::Repo::with_revision( "openai/clip-vit-base-patch32".to_string(), hf_hub::RepoType::Model, "refs/pr/15".to_string(), )); api.get("model.safetensors")? } Some(model) => model.into(), }; let tokenizer = get_tokenizer(args.tokenizer)?; let config = clip::ClipConfig::vit_base_patch32(); let device = candle_examples::device(args.cpu)?; let vec_imgs = match args.images { Some(imgs) => imgs, None => vec![ "candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg".to_string(), "candle-examples/examples/yolo-v8/assets/bike.jpg".to_string(), ], }; let images = load_images(&vec_imgs, config.image_size)?.to_device(&device)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file.clone()], DType::F32, &device)? }; let model = clip::ClipModel::new(vb, &config)?; let (input_ids, vec_seq) = tokenize_sequences(args.sequences, &tokenizer, &device)?; let (_logits_per_text, logits_per_image) = model.forward(&images, &input_ids)?; let softmax_image = softmax(&logits_per_image, 1)?; let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::<f32>()?; println!("softmax_image_vec: {:?}", softmax_image_vec); let probability_vec = softmax_image_vec .iter() .map(|v| v * 100.0) .collect::<Vec<f32>>(); let probability_per_image = probability_vec.len() / vec_imgs.len(); for (i, img) in vec_imgs.iter().enumerate() { let start = i * probability_per_image; let end = start + probability_per_image; let prob = &probability_vec[start..end]; println!("\n\nResults for image: {}\n", img); for (i, p) in prob.iter().enumerate() { println!("Probability: {:.4}% Text: {} ", p, vec_seq[i]); } } Ok(()) } pub fn get_tokenizer(tokenizer: Option<String>) -> anyhow::Result<Tokenizer> { let tokenizer = match tokenizer { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.repo(hf_hub::Repo::with_revision( "openai/clip-vit-base-patch32".to_string(), hf_hub::RepoType::Model, "refs/pr/15".to_string(), )); api.get("tokenizer.json")? } Some(file) => file.into(), }; Tokenizer::from_file(tokenizer).map_err(E::msg) } pub fn tokenize_sequences( sequences: Option<Vec<String>>, tokenizer: &Tokenizer, device: &Device, ) -> anyhow::Result<(Tensor, Vec<String>)> { let pad_id = *tokenizer .get_vocab(true) .get("<|endoftext|>") .ok_or(E::msg("No pad token"))?; let vec_seq = match sequences { Some(seq) => seq, None => vec![ "a cycling race".to_string(), "a photo of two cats".to_string(), "a robot holding a candle".to_string(), ], }; let mut tokens = vec![]; for seq in vec_seq.clone() { let encoding = tokenizer.encode(seq, true).map_err(E::msg)?; tokens.push(encoding.get_ids().to_vec()); } let max_len = tokens.iter().map(|v| v.len()).max().unwrap_or(0); // Pad the sequences to have the same length for token_vec in tokens.iter_mut() { let len_diff = max_len - token_vec.len(); if len_diff > 0 { token_vec.extend(vec![pad_id; len_diff]); } } let input_ids = Tensor::new(tokens, device)?; Ok((input_ids, vec_seq)) }
candle/candle-examples/examples/clip/main.rs/0
{ "file_path": "candle/candle-examples/examples/clip/main.rs", "repo_id": "candle", "token_count": 2483 }
//! Depth Anything V2 //! https://huggingface.co/spaces/depth-anything/Depth-Anything-V2 #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use clap::Parser; use std::{ffi::OsString, path::PathBuf, sync::Arc}; use candle::DType::{F32, U8}; use candle::{DType, Device, Module, Result, Tensor}; use candle_examples::{load_image, load_image_and_resize, save_image}; use candle_nn::VarBuilder; use candle_transformers::models::depth_anything_v2::{DepthAnythingV2, DepthAnythingV2Config}; use candle_transformers::models::dinov2; use crate::color_map::SpectralRColormap; mod color_map; // taken these from: https://huggingface.co/spaces/depth-anything/Depth-Anything-V2/blob/main/depth_anything_v2/dpt.py#L207 const MAGIC_MEAN: [f32; 3] = [0.485, 0.456, 0.406]; const MAGIC_STD: [f32; 3] = [0.229, 0.224, 0.225]; const DINO_IMG_SIZE: usize = 518; #[derive(Parser)] struct Args { #[arg(long)] dinov2_model: Option<PathBuf>, #[arg(long)] depth_anything_v2_model: Option<PathBuf>, #[arg(long)] image: PathBuf, #[arg(long)] output_dir: Option<PathBuf>, #[arg(long)] cpu: bool, #[arg(long)] color_map: bool, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let dinov2_model_file = match args.dinov2_model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-dino-v2".into()); api.get("dinov2_vits14.safetensors")? } Some(dinov2_model) => dinov2_model, }; println!("Using file {:?}", dinov2_model_file); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[dinov2_model_file], F32, &device)? }; let dinov2 = dinov2::vit_small(vb)?; println!("DinoV2 model built"); let depth_anything_model_file = match args.depth_anything_v2_model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("jeroenvlek/depth-anything-v2-safetensors".into()); api.get("depth_anything_v2_vits.safetensors")? } Some(depth_anything_model) => depth_anything_model, }; println!("Using file {:?}", depth_anything_model_file); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[depth_anything_model_file], DType::F32, &device)? }; let config = DepthAnythingV2Config::vit_small(); let depth_anything = DepthAnythingV2::new(Arc::new(dinov2), config, vb)?; let (original_height, original_width, image) = load_and_prep_image(&args.image, &device)?; println!("Loaded image {image:?}"); let depth = depth_anything.forward(&image)?; println!("Got predictions {:?}", depth.shape()); let output_image = post_process_image(&depth, original_height, original_width, args.color_map)?; let output_path = full_output_path(&args.image, &args.output_dir); println!("Saving image to {}", output_path.to_string_lossy()); save_image(&output_image, output_path)?; Ok(()) } fn full_output_path(image_path: &PathBuf, output_dir: &Option<PathBuf>) -> PathBuf { let input_file_name = image_path.file_name().unwrap(); let mut output_file_name = OsString::from("depth_"); output_file_name.push(input_file_name); let mut output_path = match output_dir { None => image_path.parent().unwrap().to_path_buf(), Some(output_path) => output_path.clone(), }; output_path.push(output_file_name); output_path } fn load_and_prep_image( image_path: &PathBuf, device: &Device, ) -> anyhow::Result<(usize, usize, Tensor)> { let (_original_image, original_height, original_width) = load_image(&image_path, None)?; let image = load_image_and_resize(&image_path, DINO_IMG_SIZE, DINO_IMG_SIZE)? .unsqueeze(0)? .to_dtype(F32)? .to_device(&device)?; let max_pixel_val = Tensor::try_from(255.0f32)? .to_device(&device)? .broadcast_as(image.shape())?; let image = (image / max_pixel_val)?; let image = normalize_image(&image, &MAGIC_MEAN, &MAGIC_STD)?; Ok((original_height, original_width, image)) } fn normalize_image(image: &Tensor, mean: &[f32; 3], std: &[f32; 3]) -> Result<Tensor> { let mean_tensor = Tensor::from_vec(mean.to_vec(), (3, 1, 1), &image.device())?.broadcast_as(image.shape())?; let std_tensor = Tensor::from_vec(std.to_vec(), (3, 1, 1), &image.device())?.broadcast_as(image.shape())?; image.sub(&mean_tensor)?.div(&std_tensor) } fn post_process_image( image: &Tensor, original_height: usize, original_width: usize, color_map: bool, ) -> Result<Tensor> { let out = image.interpolate2d(original_height, original_width)?; let out = scale_image(&out)?; let out = if color_map { let spectral_r = SpectralRColormap::new(); spectral_r.gray2color(&out)? } else { let rgb_slice = [&out, &out, &out]; Tensor::cat(&rgb_slice, 0)?.squeeze(1)? }; let max_pixel_val = Tensor::try_from(255.0f32)? .to_device(out.device())? .broadcast_as(out.shape())?; let out = (out * max_pixel_val)?; out.to_dtype(U8) } fn scale_image(depth: &Tensor) -> Result<Tensor> { let flat_values: Vec<f32> = depth.flatten_all()?.to_vec1()?; let min_val = flat_values.iter().min_by(|a, b| a.total_cmp(b)).unwrap(); let max_val = flat_values.iter().max_by(|a, b| a.total_cmp(b)).unwrap(); let min_val_tensor = Tensor::try_from(*min_val)? .to_device(depth.device())? .broadcast_as(depth.shape())?; let depth = (depth - min_val_tensor)?; let range = max_val - min_val; let range_tensor = Tensor::try_from(range)? .to_device(depth.device())? .broadcast_as(depth.shape())?; depth / range_tensor }
candle/candle-examples/examples/depth_anything_v2/main.rs/0
{ "file_path": "candle/candle-examples/examples/depth_anything_v2/main.rs", "repo_id": "candle", "token_count": 2544 }
# candle-helium: 2b LLM with CC-BY licensed weights Helium-1 is a lightweight model with around 2B parameters, the preview version currently supports 6 languages, showing strong capabilities in those languages compared to existing open weights models. - [Blog Post](https://kyutai.org/2025/01/13/helium.html) announcing the model release. - [Model card](https://huggingface.co/kyutai/helium-1-preview-2b) on the HuggingFace Hub. ## Running the example ```bash $ cargo run --example helium --release --features cuda -- --prompt 'Write helloworld code in Rust' --sample-len 150 ```
candle/candle-examples/examples/helium/README.md/0
{ "file_path": "candle/candle-examples/examples/helium/README.md", "repo_id": "candle", "token_count": 174 }
# candle-mamba-minimal: minimal implementation of Mamba This is based on [mamba-minimal](https://github.com/johnma2006/mamba-minimal). Compared to the mamba example, this version can handle training but is much slower. ## Running the example ```bash $ cargo run --example mamba-minimal --release -- --prompt "Mamba is the" Mamba is the most popular and best-selling game in the world. It has been downloaded more than 1,000 times by over 1 million people worldwide since its release on March 18th 2016. The Mamba series of games are a collection that combines elements from all genres including action, adventure, strategy & puzzle games with some unique gameplay features such as stealth and survival. The game is also known for its innovative graphics and the ability to play in a variety of different modes like single player or multiplayer. ```
candle/candle-examples/examples/mamba-minimal/README.md/0
{ "file_path": "candle/candle-examples/examples/mamba-minimal/README.md", "repo_id": "candle", "token_count": 206 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::mixtral::{Config, Model}; use candle::{DType, Device, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("</s>") { Some(token) => token, None => anyhow::bail!("cannot find the </s> token"), }; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let start_pos = tokens.len().saturating_sub(context_size); let ctxt = &tokens[start_pos..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input, start_pos)?; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] use_flash_attn: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 10000)] sample_len: usize, #[arg(long, default_value = "mistralai/Mixtral-8x7B-v0.1")] model_id: String, #[arg(long, default_value = "main")] revision: String, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] weight_files: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id, RepoType::Model, args.revision, )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("tokenizer.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config = Config::v0_1_8x7b(args.use_flash_attn); let device = candle_examples::device(args.cpu)?; let dtype = device.bf16_default_to_f32(); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let model = Model::new(&config, vb)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/mixtral/main.rs/0
{ "file_path": "candle/candle-examples/examples/mixtral/main.rs", "repo_id": "candle", "token_count": 3396 }
# candle-olmo: Open Language Models designed to enable the science of language models OLMo is a series of Open Language Models designed to enable the science of language models. - **Project Page:** https://allenai.org/olmo - **Paper:** [Link](https://arxiv.org/abs/2402.00838) - **Technical blog post:** https://blog.allenai.org/olmo-open-language-model-87ccfc95f580 - **W&B Logs:** https://wandb.ai/ai2-llm/OLMo-1B/reports/OLMo-1B--Vmlldzo2NzY1Njk1 <!-- - **Press release:** TODO --> ## Running the example ```bash $ cargo run --example olmo --release -- --prompt "It is only with the heart that one can see rightly" avx: true, neon: false, simd128: false, f16c: true temp: 0.20 repeat-penalty: 1.10 repeat-last-n: 64 retrieved the files in 354.977µs loaded the model in 19.87779666s It is only with the heart that one can see rightly; what is essential is invisible to the eye. ``` Various model sizes are available via the `--model` argument. ```bash $ cargo run --example olmo --release -- --model 1.7-7b --prompt 'It is only with the heart that one can see rightly' avx: true, neon: false, simd128: false, f16c: true temp: 0.20 repeat-penalty: 1.10 repeat-last-n: 64 retrieved the files in 1.226087ms loaded the model in 171.274578609s It is only with the heart that one can see rightly; what is essential is invisible to the eye.” ~ Antoine de Saint-Exupery, The Little Prince I am a big fan of this quote. It reminds me that I need to be open and aware of my surroundings in order to truly appreciate them. ```
candle/candle-examples/examples/olmo/README.md/0
{ "file_path": "candle/candle-examples/examples/olmo/README.md", "repo_id": "candle", "token_count": 504 }
use super::gym_env::{GymEnv, Step}; use candle::{DType, Device, Error, Module, Result, Tensor}; use candle_nn::{ linear, ops::log_softmax, ops::softmax, sequential::seq, Activation, AdamW, Optimizer, ParamsAdamW, VarBuilder, VarMap, }; use rand::{distributions::Distribution, rngs::ThreadRng, Rng}; fn new_model( input_shape: &[usize], num_actions: usize, dtype: DType, device: &Device, ) -> Result<(impl Module, VarMap)> { let input_size = input_shape.iter().product(); let varmap = VarMap::new(); let var_builder = VarBuilder::from_varmap(&varmap, dtype, device); let model = seq() .add(linear(input_size, 32, var_builder.pp("lin1"))?) .add(Activation::Relu) .add(linear(32, num_actions, var_builder.pp("lin2"))?); Ok((model, varmap)) } fn accumulate_rewards(steps: &[Step<i64>]) -> Vec<f64> { let mut rewards: Vec<f64> = steps.iter().map(|s| s.reward).collect(); let mut acc_reward = 0f64; for (i, reward) in rewards.iter_mut().enumerate().rev() { if steps[i].terminated { acc_reward = 0.0; } acc_reward += *reward; *reward = acc_reward; } rewards } fn weighted_sample(probs: Vec<f32>, rng: &mut ThreadRng) -> Result<usize> { let distribution = rand::distributions::WeightedIndex::new(probs).map_err(Error::wrap)?; let mut rng = rng; Ok(distribution.sample(&mut rng)) } pub fn run() -> Result<()> { let env = GymEnv::new("CartPole-v1")?; println!("action space: {:?}", env.action_space()); println!("observation space: {:?}", env.observation_space()); let (model, varmap) = new_model( env.observation_space(), env.action_space(), DType::F32, &Device::Cpu, )?; let optimizer_params = ParamsAdamW { lr: 0.01, weight_decay: 0.01, ..Default::default() }; let mut optimizer = AdamW::new(varmap.all_vars(), optimizer_params)?; let mut rng = rand::thread_rng(); for epoch_idx in 0..100 { let mut state = env.reset(rng.gen::<u64>())?; let mut steps: Vec<Step<i64>> = vec![]; loop { let action = { let action_probs: Vec<f32> = softmax(&model.forward(&state.detach().unsqueeze(0)?)?, 1)? .squeeze(0)? .to_vec1()?; weighted_sample(action_probs, &mut rng)? as i64 }; let step = env.step(action)?; steps.push(step.copy_with_obs(&state)); if step.terminated || step.truncated { state = env.reset(rng.gen::<u64>())?; if steps.len() > 5000 { break; } } else { state = step.state; } } let total_reward: f64 = steps.iter().map(|s| s.reward).sum(); let episodes: i64 = steps .iter() .map(|s| (s.terminated || s.truncated) as i64) .sum(); println!( "epoch: {:<3} episodes: {:<5} avg reward per episode: {:.2}", epoch_idx, episodes, total_reward / episodes as f64 ); let batch_size = steps.len(); let rewards = Tensor::from_vec(accumulate_rewards(&steps), batch_size, &Device::Cpu)? .to_dtype(DType::F32)? .detach(); let actions_mask = { let actions: Vec<i64> = steps.iter().map(|s| s.action).collect(); let actions_mask: Vec<Tensor> = actions .iter() .map(|&action| { // One-hot encoding let mut action_mask = vec![0.0; env.action_space()]; action_mask[action as usize] = 1.0; Tensor::from_vec(action_mask, env.action_space(), &Device::Cpu) .unwrap() .to_dtype(DType::F32) .unwrap() }) .collect(); Tensor::stack(&actions_mask, 0)?.detach() }; let states = { let states: Vec<Tensor> = steps.into_iter().map(|s| s.state).collect(); Tensor::stack(&states, 0)?.detach() }; let log_probs = actions_mask .mul(&log_softmax(&model.forward(&states)?, 1)?)? .sum(1)?; let loss = rewards.mul(&log_probs)?.neg()?.mean_all()?; optimizer.backward_step(&loss)?; } Ok(()) }
candle/candle-examples/examples/reinforcement-learning/policy_gradient.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/policy_gradient.rs", "repo_id": "candle", "token_count": 2332 }
# candle-vit Vision Transformer (ViT) model implementation following the lines of [vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) This uses a classification head trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example vit --release -- --image tiger.jpg loaded image Tensor[dims 3, 224, 224; f32] model built tiger, Panthera tigris : 100.00% tiger cat : 0.00% jaguar, panther, Panthera onca, Felis onca: 0.00% leopard, Panthera pardus: 0.00% lion, king of beasts, Panthera leo: 0.00% ```
candle/candle-examples/examples/vit/README.md/0
{ "file_path": "candle/candle-examples/examples/vit/README.md", "repo_id": "candle", "token_count": 219 }
use candle::{Device, Result, Tensor}; pub const IMAGENET_MEAN: [f32; 3] = [0.485f32, 0.456, 0.406]; pub const IMAGENET_STD: [f32; 3] = [0.229f32, 0.224, 0.225]; /// Loads an image from disk using the image crate at the requested resolution, /// using the given std and mean parameters. /// This returns a tensor with shape (3, res, res). imagenet normalization is applied. pub fn load_image_with_std_mean<P: AsRef<std::path::Path>>( p: P, res: usize, mean: &[f32; 3], std: &[f32; 3], ) -> Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill( res as u32, res as u32, image::imageops::FilterType::Triangle, ); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (res, res, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(mean, &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(std, &Device::Cpu)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } /// Loads an image from disk using the image crate at the requested resolution. /// This returns a tensor with shape (3, res, res). imagenet normalization is applied. pub fn load_image<P: AsRef<std::path::Path>>(p: P, res: usize) -> Result<Tensor> { load_image_with_std_mean(p, res, &IMAGENET_MEAN, &IMAGENET_STD) } /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 224, 224). imagenet normalization is applied. pub fn load_image224<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> { load_image(p, 224) } /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 518, 518). imagenet normalization is applied. /// The model dinov2 reg4 analyzes images with dimensions 3x518x518 (resulting in 37x37 transformer tokens). pub fn load_image518<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> { load_image(p, 518) } pub const CLASS_COUNT: i64 = 1000; pub const CLASSES: [&str; 1000] = [ "tench, Tinca tinca", "goldfish, Carassius auratus", "great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias", "tiger shark, Galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, Struthio camelus", "brambling, Fringilla montifringilla", "goldfinch, Carduelis carduelis", "house finch, linnet, Carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, Passerina cyanea", "robin, American robin, Turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, American eagle, Haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, Strix nebulosa", "European fire salamander, Salamandra salamandra", "common newt, Triturus vulgaris", "eft", "spotted salamander, Ambystoma maculatum", "axolotl, mud puppy, Ambystoma mexicanum", "bullfrog, Rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui", "loggerhead, loggerhead turtle, Caretta caretta", "leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, Iguana iguana", "American chameleon, anole, Anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, Chlamydosaurus kingi", "alligator lizard", "Gila monster, Heloderma suspectum", "green lizard, Lacerta viridis", "African chameleon, Chamaeleo chamaeleon", "Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis", "African crocodile, Nile crocodile, Crocodylus niloticus", "American alligator, Alligator mississipiensis", "triceratops", "thunder snake, worm snake, Carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, Hypsiglena torquata", "boa constrictor, Constrictor constrictor", "rock python, rock snake, Python sebae", "Indian cobra, Naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, Cerastes cornutus", "diamondback, diamondback rattlesnake, Crotalus adamanteus", "sidewinder, horned rattlesnake, Crotalus cerastes", "trilobite", "harvestman, daddy longlegs, Phalangium opilio", "scorpion", "black and gold garden spider, Argiope aurantia", "barn spider, Araneus cavaticus", "garden spider, Aranea diademata", "black widow, Latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, Bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "African grey, African gray, Psittacus erithacus", "macaw", "sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, Mergus serrator", "goose", "black swan, Cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "Dungeness crab, Cancer magister", "rock crab, Cancer irroratus", "fiddler crab", "king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica", "American lobster, Northern lobster, Maine lobster, Homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, Ciconia ciconia", "black stork, Ciconia nigra", "spoonbill", "flamingo", "little blue heron, Egretta caerulea", "American egret, great white heron, Egretta albus", "bittern", "crane", "limpkin, Aramus pictus", "European gallinule, Porphyrio porphyrio", "American coot, marsh hen, mud hen, water hen, Fulica americana", "bustard", "ruddy turnstone, Arenaria interpres", "red-backed sandpiper, dunlin, Erolia alpina", "redshank, Tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, Aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, Orcinus orca", "dugong, Dugong dugon", "sea lion", "Chihuahua", "Japanese spaniel", "Maltese dog, Maltese terrier, Maltese", "Pekinese, Pekingese, Peke", "Shih-Tzu", "Blenheim spaniel", "papillon", "toy terrier", "Rhodesian ridgeback", "Afghan hound, Afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "Walker hound, Walker foxhound", "English foxhound", "redbone", "borzoi, Russian wolfhound", "Irish wolfhound", "Italian greyhound", "whippet", "Ibizan hound, Ibizan Podenco", "Norwegian elkhound, elkhound", "otterhound, otter hound", "Saluki, gazelle hound", "Scottish deerhound, deerhound", "Weimaraner", "Staffordshire bullterrier, Staffordshire bull terrier", "American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier", "Bedlington terrier", "Border terrier", "Kerry blue terrier", "Irish terrier", "Norfolk terrier", "Norwich terrier", "Yorkshire terrier", "wire-haired fox terrier", "Lakeland terrier", "Sealyham terrier, Sealyham", "Airedale, Airedale terrier", "cairn, cairn terrier", "Australian terrier", "Dandie Dinmont, Dandie Dinmont terrier", "Boston bull, Boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "Scotch terrier, Scottish terrier, Scottie", "Tibetan terrier, chrysanthemum dog", "silky terrier, Sydney silky", "soft-coated wheaten terrier", "West Highland white terrier", "Lhasa, Lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "Labrador retriever", "Chesapeake Bay retriever", "German short-haired pointer", "vizsla, Hungarian pointer", "English setter", "Irish setter, red setter", "Gordon setter", "Brittany spaniel", "clumber, clumber spaniel", "English springer, English springer spaniel", "Welsh springer spaniel", "cocker spaniel, English cocker spaniel, cocker", "Sussex spaniel", "Irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "Old English sheepdog, bobtail", "Shetland sheepdog, Shetland sheep dog, Shetland", "collie", "Border collie", "Bouvier des Flandres, Bouviers des Flandres", "Rottweiler", "German shepherd, German shepherd dog, German police dog, alsatian", "Doberman, Doberman pinscher", "miniature pinscher", "Greater Swiss Mountain dog", "Bernese mountain dog", "Appenzeller", "EntleBucher", "boxer", "bull mastiff", "Tibetan mastiff", "French bulldog", "Great Dane", "Saint Bernard, St Bernard", "Eskimo dog, husky", "malamute, malemute, Alaskan malamute", "Siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "Leonberg", "Newfoundland, Newfoundland dog", "Great Pyrenees", "Samoyed, Samoyede", "Pomeranian", "chow, chow chow", "keeshond", "Brabancon griffon", "Pembroke, Pembroke Welsh corgi", "Cardigan, Cardigan Welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "Mexican hairless", "timber wolf, grey wolf, gray wolf, Canis lupus", "white wolf, Arctic wolf, Canis lupus tundrarum", "red wolf, maned wolf, Canis rufus, Canis niger", "coyote, prairie wolf, brush wolf, Canis latrans", "dingo, warrigal, warragal, Canis dingo", "dhole, Cuon alpinus", "African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus", "hyena, hyaena", "red fox, Vulpes vulpes", "kit fox, Vulpes macrotis", "Arctic fox, white fox, Alopex lagopus", "grey fox, gray fox, Urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "Persian cat", "Siamese cat, Siamese", "Egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, Felis concolor", "lynx, catamount", "leopard, Panthera pardus", "snow leopard, ounce, Panthera uncia", "jaguar, panther, Panthera onca, Felis onca", "lion, king of beasts, Panthera leo", "tiger, Panthera tigris", "cheetah, chetah, Acinonyx jubatus", "brown bear, bruin, Ursus arctos", "American black bear, black bear, Ursus americanus, Euarctos americanus", "ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus", "sloth bear, Melursus ursinus, Ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, Danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "Angora, Angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, Sciurus niger", "marmot", "beaver", "guinea pig, Cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, Sus scrofa", "wild boar, boar, Sus scrofa", "warthog", "hippopotamus, hippo, river horse, Hippopotamus amphibius", "ox", "water buffalo, water ox, Asiatic buffalo, Bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis", "ibex, Capra ibex", "hartebeest", "impala, Aepyceros melampus", "gazelle", "Arabian camel, dromedary, Camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, Mustela putorius", "black-footed ferret, ferret, Mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, Bradypus tridactylus", "orangutan, orang, orangutang, Pongo pygmaeus", "gorilla, Gorilla gorilla", "chimpanzee, chimp, Pan troglodytes", "gibbon, Hylobates lar", "siamang, Hylobates syndactylus, Symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, Erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, Nasalis larvatus", "marmoset", "capuchin, ringtail, Cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, Ateles geoffroyi", "squirrel monkey, Saimiri sciureus", "Madagascar cat, ring-tailed lemur, Lemur catta", "indri, indris, Indri indri, Indri brevicaudatus", "Indian elephant, Elephas maximus", "African elephant, Loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens", "giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch", "rock beauty, Holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, Lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, Biro", "Band Aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM", "cassette", "cassette player", "castle", "catamaran", "CD player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "Christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "Crock Pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "Dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "French horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "iPod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, T-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "Loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "Model T", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, CRO", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "Petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "Polaroid camera, Polaroid Land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, RV, R.V.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, CRT screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, U-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "Windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, Virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "French loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue", ];
candle/candle-examples/src/imagenet.rs/0
{ "file_path": "candle/candle-examples/src/imagenet.rs", "repo_id": "candle", "token_count": 13048 }
// This header is not specific to our application and you'll probably want // something like this for any extension you're building. This includes the // infrastructure needed to serialize descriptors that are used with the // "opaque" parameter of the GPU custom call. In our example we'll use this // parameter to pass the size of our problem. #ifndef _GPU_OPS_KERNEL_HELPERS_H_ #define _GPU_OPS_KERNEL_HELPERS_H_ #include <cstdint> #include <stdexcept> #include <string> #include <type_traits> #define JAX_APEX_WARP_SIZE 32 namespace gpu_ops { // https://en.cppreference.com/w/cpp/numeric/bit_cast template <class To, class From> typename std::enable_if<sizeof(To) == sizeof(From) && std::is_trivially_copyable<From>::value && std::is_trivially_copyable<To>::value, To>::type bit_cast(const From &src) noexcept { static_assert(std::is_trivially_constructible<To>::value, "This implementation additionally requires destination type to " "be trivially constructible"); To dst; memcpy(&dst, &src, sizeof(To)); return dst; } template <typename T> std::string PackDescriptorAsString(const T &descriptor) { return std::string(bit_cast<const char *>(&descriptor), sizeof(T)); } template <typename T> const T *UnpackDescriptor(const char *opaque, std::size_t opaque_len) { if (opaque_len != sizeof(T)) { throw std::runtime_error("Invalid opaque object size"); } return bit_cast<const T *>(opaque); } } // namespace gpu_ops #endif
candle/candle-flash-attn/kernels/kernel_helpers.h/0
{ "file_path": "candle/candle-flash-attn/kernels/kernel_helpers.h", "repo_id": "candle", "token_count": 600 }
#include "cuda_utils.cuh" #include<stdint.h> #define AFFINE_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const TYPENAME *inp, \ TYPENAME *out, \ const TYPENAME mul, \ const TYPENAME add \ ) { \ const size_t *dims = info; \ const size_t *strides = info + num_dims; \ if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ TYPENAME x = inp ? inp[i] : out[i]; \ out[i] = x * mul + add; \ } \ } \ else { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ TYPENAME x = inp ? inp[strided_i] : out[i]; \ out[i] = x * mul + add; \ } \ } \ } \ #if __CUDA_ARCH__ >= 800 AFFINE_OP(__nv_bfloat16, affine_bf16) #endif #if __CUDA_ARCH__ >= 530 AFFINE_OP(__half, affine_f16) #endif AFFINE_OP(float, affine_f32) AFFINE_OP(double, affine_f64) AFFINE_OP(uint8_t, affine_u8) AFFINE_OP(uint32_t, affine_u32) AFFINE_OP(int64_t, affine_i64)
candle/candle-kernels/src/affine.cu/0
{ "file_path": "candle/candle-kernels/src/affine.cu", "repo_id": "candle", "token_count": 659 }
// Imported from https://github.com/ggerganov/llama.cpp/blob/master/ggml-metal.metal #include <metal_stdlib> using namespace metal; #define SWAP(x, y) { auto tmp = (x); (x) = (y); (y) = tmp; } #define SORT_ASC 1 #define SORT_DESC 0 template<int order, typename T> METAL_FUNC void argsort( device const T * x, device uint32_t * dst, constant int64_t & ncols, constant int64_t & ncols_pad, threadgroup uint32_t * shared_values [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]]) { int col = tpitg[0]; int row = tgpig[1]; if (col >= ncols_pad) return; device const T * x_row = x + row * ncols; threadgroup uint32_t * dst_row = shared_values; // initialize indices dst_row[col] = col; threadgroup_barrier(mem_flags::mem_threadgroup); for (int k = 2; k <= ncols_pad; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (dst_row[col] >= ncols || (dst_row[ixj] < ncols && (order == SORT_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]])) ) { SWAP(dst_row[col], dst_row[ixj]); } } else { if (dst_row[ixj] >= ncols || (dst_row[col] < ncols && (order == SORT_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]])) ) { SWAP(dst_row[col], dst_row[ixj]); } } } threadgroup_barrier(mem_flags::mem_threadgroup); } } // copy the result to dst without the padding if (col < ncols) { dst[row * ncols + col] = dst_row[col]; } } #define ARGSORT(T, RUST_T) \ kernel void asort_asc_##RUST_T( \ device const T * x, \ device uint32_t * dst, \ constant int64_t & ncols, \ constant int64_t & ncols_pad, \ threadgroup uint32_t * shared_values [[threadgroup(0)]], \ uint3 tgpig[[threadgroup_position_in_grid]], \ uint3 tpitg[[thread_position_in_threadgroup]] \ ) { \ argsort<SORT_ASC, T>(x, dst, ncols, ncols_pad, shared_values, tgpig, tpitg); \ } \ kernel void asort_desc_##RUST_T( \ device const T * x, \ device uint32_t * dst, \ constant int64_t & ncols, \ constant int64_t & ncols_pad, \ threadgroup uint32_t * shared_values [[threadgroup(0)]], \ uint3 tgpig[[threadgroup_position_in_grid]], \ uint3 tpitg[[thread_position_in_threadgroup]] \ ) { \ argsort<SORT_DESC, T>(x, dst, ncols, ncols_pad, shared_values, tgpig, tpitg); \ } \ ARGSORT(float, f32) ARGSORT(half, f16) ARGSORT(uint8_t, u8) ARGSORT(uint32_t, u32) #if __METAL_VERSION__ >= 220 ARGSORT(int64_t, i64) #endif #if defined(__HAVE_BFLOAT__) ARGSORT(bfloat, bf16) #endif
candle/candle-metal-kernels/src/sort.metal/0
{ "file_path": "candle/candle-metal-kernels/src/sort.metal", "repo_id": "candle", "token_count": 1748 }
//! Loss Calculations //! use candle::{Result, Tensor}; /// The negative log likelihood loss. /// /// Arguments /// /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// of categories. This is expected to contain log probabilities. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N`. /// /// The resulting tensor is a scalar containing the average value over the batch. pub fn nll(inp: &Tensor, target: &Tensor) -> Result<Tensor> { let b_sz = match target.dims() { &[b_sz] => b_sz, dims => candle::bail!("the target tensor should have a single dimension ({dims:?})"), }; match inp.dims() { &[inp_b_sz, _] => { if inp_b_sz != b_sz { candle::bail!("batch size mismatch between inp ({inp_b_sz}) and target ({b_sz})") } } dims => candle::bail!("the target tensor should have two dimensions ({dims:?})"), } inp.gather(&target.unsqueeze(1)?, 1)? .sum_all()? .affine(-1f64 / b_sz as f64, 0.) } /// The cross-entropy loss. /// /// Arguments /// /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// of categories. This is expected to raw logits. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N`. /// /// The resulting tensor is a scalar containing the average value over the batch. pub fn cross_entropy(inp: &Tensor, target: &Tensor) -> Result<Tensor> { if inp.rank() != 2 { candle::bail!("cross_entropy expects an input tensor of rank 2") } let inp = crate::ops::log_softmax(inp, 1)?; nll(&inp, target) } /// The mean squared error loss. pub fn mse(inp: &Tensor, target: &Tensor) -> Result<Tensor> { (inp - target)?.sqr()?.mean_all() } /// The binary cross-entropy with logit loss. /// /// Arguments /// /// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number /// of categories. This is expected to raw logits. /// * [target]: The ground truth labels as a tensor of u32 of dimension `N, C` where `N` is the batch size and `C` the number /// of categories. /// /// The resulting tensor is a scalar containing the average value over the batch. pub fn binary_cross_entropy_with_logit(inp: &Tensor, target: &Tensor) -> Result<Tensor> { let inp = crate::ops::sigmoid(inp)?; let left_side = target * inp.log()?; let right_side = (target.affine(-1., 1.))? * inp.affine(-1., 1.)?.log()?; let loss = left_side? + right_side?; let loss = loss?.neg()?.mean_all()?; Ok(loss) }
candle/candle-nn/src/loss.rs/0
{ "file_path": "candle/candle-nn/src/loss.rs", "repo_id": "candle", "token_count": 1049 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{test_utils::to_vec2_round, DType, Device, Result, Tensor}; use candle_nn::RNN; /* The following test can be verified against PyTorch using the following snippet. import torch from torch import nn lstm = nn.LSTM(2, 3, 1) lstm.weight_ih_l0 = torch.nn.Parameter(torch.arange(0., 24.).reshape(12, 2).cos()) lstm.weight_hh_l0 = torch.nn.Parameter(torch.arange(0., 36.).reshape(12, 3).sin()) lstm.bias_ih_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1, 1, -0.5, 2])) lstm.bias_hh_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1, 1, -0.5, 2]).cos()) state = torch.zeros((1, 3)), torch.zeros((1, 3)) for inp in [3., 1., 4., 1., 5., 9., 2.]: inp = torch.tensor([[inp, inp * 0.5]]) _out, state = lstm(inp, state) print(state) # (tensor([[ 0.9919, 0.1738, -0.1451]], grad_fn=...), tensor([[ 5.7250, 0.4458, -0.2908]], grad_fn=...)) */ #[test] fn lstm() -> Result<()> { let cpu = &Device::Cpu; let w_ih = Tensor::arange(0f32, 24f32, cpu)?.reshape((12, 2))?; let w_ih = w_ih.cos()?; let w_hh = Tensor::arange(0f32, 36f32, cpu)?.reshape((12, 3))?; let w_hh = w_hh.sin()?; let b_ih = Tensor::new( &[-1f32, 1., -0.5, 2., -1., 1., -0.5, 2., -1., 1., -0.5, 2.], cpu, )?; let b_hh = b_ih.cos()?; let tensors: std::collections::HashMap<_, _> = [ ("weight_ih_l0".to_string(), w_ih), ("weight_hh_l0".to_string(), w_hh), ("bias_ih_l0".to_string(), b_ih), ("bias_hh_l0".to_string(), b_hh), ] .into_iter() .collect(); let vb = candle_nn::VarBuilder::from_tensors(tensors, DType::F32, cpu); let lstm = candle_nn::lstm(2, 3, Default::default(), vb)?; let mut state = lstm.zero_state(1)?; for inp in [3f32, 1., 4., 1., 5., 9., 2.] { let inp = Tensor::new(&[[inp, inp * 0.5]], cpu)?; state = lstm.step(&inp, &state)? } let h = state.h(); let c = state.c(); assert_eq!(to_vec2_round(h, 4)?, &[[0.9919, 0.1738, -0.1451]]); assert_eq!(to_vec2_round(c, 4)?, &[[5.725, 0.4458, -0.2908]]); Ok(()) } /* The following test can be verified against PyTorch using the following snippet. import torch from torch import nn gru = nn.GRU(2, 3, 1) gru.weight_ih_l0 = torch.nn.Parameter(torch.arange(0., 18.).reshape(9, 2).cos()) gru.weight_hh_l0 = torch.nn.Parameter(torch.arange(0., 27.).reshape(9, 3).sin()) gru.bias_ih_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1])) gru.bias_hh_l0 = torch.nn.Parameter(torch.tensor([-1., 1., -0.5, 2, -1, 1, -0.5, 2, -1]).cos()) state = torch.zeros((1, 3)) for inp in [3., 1., 4., 1., 5., 9., 2.]: inp = torch.tensor([[inp, inp * 0.5]]) _out, state = gru(inp, state) print(state) # tensor([[ 0.0579, 0.8836, -0.9991]], grad_fn=<SqueezeBackward1>) */ #[test] fn gru() -> Result<()> { let cpu = &Device::Cpu; let w_ih = Tensor::arange(0f32, 18f32, cpu)?.reshape((9, 2))?; let w_ih = w_ih.cos()?; let w_hh = Tensor::arange(0f32, 27f32, cpu)?.reshape((9, 3))?; let w_hh = w_hh.sin()?; let b_ih = Tensor::new(&[-1f32, 1., -0.5, 2., -1., 1., -0.5, 2., -1.], cpu)?; let b_hh = b_ih.cos()?; let tensors: std::collections::HashMap<_, _> = [ ("weight_ih_l0".to_string(), w_ih), ("weight_hh_l0".to_string(), w_hh), ("bias_ih_l0".to_string(), b_ih), ("bias_hh_l0".to_string(), b_hh), ] .into_iter() .collect(); let vb = candle_nn::VarBuilder::from_tensors(tensors, DType::F32, cpu); let gru = candle_nn::gru(2, 3, Default::default(), vb)?; let mut state = gru.zero_state(1)?; for inp in [3f32, 1., 4., 1., 5., 9., 2.] { let inp = Tensor::new(&[[inp, inp * 0.5]], cpu)?; state = gru.step(&inp, &state)? } let h = state.h(); assert_eq!(to_vec2_round(h, 4)?, &[[0.0579, 0.8836, -0.9991]]); Ok(()) }
candle/candle-nn/tests/rnn.rs/0
{ "file_path": "candle/candle-nn/tests/rnn.rs", "repo_id": "candle", "token_count": 2010 }
import logging try: from .candle import * except ImportError as e: # If we are in development mode, or we did not bundle the DLLs, we try to locate them here # PyO3 wont give us any information about what DLLs are missing, so we can only try to load # the DLLs and re-import the module logging.warning("DLLs were not bundled with this package. Trying to locate them...") import os import platform def locate_cuda_dlls(): logging.warning("Locating CUDA DLLs...") # Try to locate CUDA_PATH environment variable cuda_path = os.environ.get("CUDA_PATH", None) if cuda_path: logging.warning(f"Found CUDA_PATH environment variable: {cuda_path}") if platform.system() == "Windows": cuda_path = os.path.join(cuda_path, "bin") else: cuda_path = os.path.join(cuda_path, "lib64") logging.warning(f"Adding {cuda_path} to DLL search path...") os.add_dll_directory(cuda_path) else: logging.warning("CUDA_PATH environment variable not found!") def locate_mkl_dlls(): # Try to locate ONEAPI_ROOT environment variable oneapi_root = os.environ.get("ONEAPI_ROOT", None) if oneapi_root: if platform.system() == "Windows": mkl_path = os.path.join( oneapi_root, "compiler", "latest", "windows", "redist", "intel64_win", "compiler" ) else: mkl_path = os.path.join(oneapi_root, "mkl", "latest", "lib", "intel64") logging.warning(f"Adding {mkl_path} to DLL search path...") os.add_dll_directory(mkl_path) else: logging.warning("ONEAPI_ROOT environment variable not found!") locate_cuda_dlls() locate_mkl_dlls() try: from .candle import * except ImportError as inner_e: raise ImportError("Could not locate DLLs. Please check the documentation for more information.") __doc__ = candle.__doc__ if hasattr(candle, "__all__"): __all__ = candle.__all__
candle/candle-pyo3/py_src/candle/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/__init__.py", "repo_id": "candle", "token_count": 919 }
from typing import TypeVar, Union, Sequence _T = TypeVar("_T") _ArrayLike = Union[ _T, Sequence[_T], Sequence[Sequence[_T]], Sequence[Sequence[Sequence[_T]]], Sequence[Sequence[Sequence[Sequence[_T]]]], ] CPU: str = "cpu" CUDA: str = "cuda" Device = TypeVar("Device", CPU, CUDA) Scalar = Union[int, float] Index = Union[int, slice, None, "Ellipsis"] Shape = Union[int, Sequence[int]]
candle/candle-pyo3/py_src/candle/typing/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/typing/__init__.py", "repo_id": "candle", "token_count": 166 }
from candle import Tensor from candle import rand import pytest def test_absolute_shapes_are_valid(): a = rand((10, 20)) assert a.shape == (10, 20) b = rand(10, 20) assert b.shape == (10, 20) pytest.raises(OverflowError, lambda: rand((10, 20, -1))) pytest.raises(OverflowError, lambda: rand(-1, 20)) pytest.raises(TypeError, lambda: rand("foo", True)) def test_relative_shapes_are_valid(): a = rand(10, 20) a = a.reshape((1, -1)) assert a.shape == (1, 200) b = rand(10, 20) b = b.reshape(-1, 1) assert b.shape == (200, 1) c = rand(10, 20) pytest.raises(TypeError, lambda: c.reshape(1, "foo")) pytest.raises(ValueError, lambda: c.reshape(1, -2)) pytest.raises(ValueError, lambda: c.reshape((-2, 1))) pytest.raises(ValueError, lambda: c.reshape((0, 1))) pytest.raises(ValueError, lambda: c.reshape((1, -1, -1)))
candle/candle-pyo3/tests/native/test_shape.py/0
{ "file_path": "candle/candle-pyo3/tests/native/test_shape.py", "repo_id": "candle", "token_count": 385 }
//! Chinese contrastive Language-Image Pre-Training //! //! Chinese contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! //! - 💻 [Chinese-CLIP](https://github.com/OFA-Sys/Chinese-CLIP) //! - 💻 [GH](https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/chinese_clip/modeling_chinese_clip.py_ use candle::{Context, DType, IndexOp, Module, Result, Shape, Tensor, D}; use candle_nn as nn; use super::{Activation, EncoderConfig}; #[derive(Clone, Debug)] pub struct ChineseClipVisionConfig { pub hidden_size: usize, pub intermediate_size: usize, pub projection_dim: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_channels: usize, pub image_size: usize, pub patch_size: usize, pub hidden_act: Activation, pub layer_norm_eps: f64, pub attention_dropout: f32, pub initializer_range: f32, pub initializer_factor: f32, } impl Default for ChineseClipVisionConfig { fn default() -> Self { ChineseClipVisionConfig { hidden_size: 768, intermediate_size: 3072, projection_dim: 512, num_hidden_layers: 12, num_attention_heads: 12, num_channels: 3, image_size: 224, patch_size: 32, hidden_act: Activation::QuickGelu, layer_norm_eps: 1e-5, attention_dropout: 0.0, initializer_range: 0.02, initializer_factor: 1.0, } } } impl ChineseClipVisionConfig { /// [referer](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/blob/main/config.json) pub fn clip_vit_base_patch16() -> Self { Self { hidden_size: 768, intermediate_size: 3072, projection_dim: 512, num_hidden_layers: 12, num_attention_heads: 12, num_channels: 3, image_size: 224, patch_size: 16, hidden_act: Activation::QuickGelu, layer_norm_eps: 1e-5, attention_dropout: 0.0, initializer_range: 0.02, initializer_factor: 1.0, } } } #[derive(Clone, Debug)] pub struct ChineseClipVisionEmbeddings { patch_embedding: nn::Conv2d, position_ids: Tensor, class_embedding: Tensor, position_embedding: nn::Embedding, } impl ChineseClipVisionEmbeddings { pub fn new(var: nn::VarBuilder, config: &ChineseClipVisionConfig) -> Result<Self> { let embed_dim = config.hidden_size; // originally nn.Parameter let class_embedding = if var.contains_tensor("class_embedding") { var.get(embed_dim, "class_embedding")? } else { Tensor::randn(0f32, 1f32, embed_dim, var.device())? }; let num_patches = (config.image_size / config.patch_size).pow(2); let num_positions = num_patches + 1; let position_ids = Tensor::arange(0, num_positions as i64, var.device())?; let conv2dconfig = nn::Conv2dConfig { stride: config.patch_size, ..Default::default() }; let position_embedding = nn::embedding(num_positions, embed_dim, var.pp("position_embedding"))?; let patch_embedding = nn::conv2d_no_bias( config.num_channels, embed_dim, config.patch_size, conv2dconfig, var.pp("patch_embedding"), )?; Ok(Self { patch_embedding, position_ids, class_embedding, position_embedding, }) } } impl Module for ChineseClipVisionEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let batch_size = xs.shape().dims(); let patch_embeds = self .patch_embedding .forward(xs)? .flatten_from(2)? .transpose(1, 2)?; let shape = Shape::from((batch_size[0], 1, self.class_embedding.dim(D::Minus1)?)); let class_embeds = self.class_embedding.expand(shape)?; let embeddings = Tensor::cat(&[class_embeds, patch_embeds], 1)?; let position_embedding = self.position_embedding.forward(&self.position_ids)?; embeddings.broadcast_add(&position_embedding) } } #[derive(Clone, Debug)] struct ChineseClipVisionAttention { k_proj: nn::Linear, v_proj: nn::Linear, q_proj: nn::Linear, out_proj: nn::Linear, head_dim: usize, scale: f64, num_attention_heads: usize, } impl ChineseClipVisionAttention { fn new(var: nn::VarBuilder, config: &EncoderConfig) -> Result<Self> { let embed_dim = config.embed_dim(); let num_attention_heads = config.num_attention_heads(); let k_proj = nn::linear(embed_dim, embed_dim, var.pp("k_proj"))?; let v_proj = nn::linear(embed_dim, embed_dim, var.pp("v_proj"))?; let q_proj = nn::linear(embed_dim, embed_dim, var.pp("q_proj"))?; let out_proj = nn::linear(embed_dim, embed_dim, var.pp("out_proj"))?; let head_dim = embed_dim / num_attention_heads; let scale = (head_dim as f64).powf(-0.5); Ok(ChineseClipVisionAttention { k_proj, v_proj, q_proj, out_proj, head_dim, scale, num_attention_heads, }) } fn shape(&self, xs: &Tensor, seq_len: usize, bsz: usize) -> Result<Tensor> { xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))? .transpose(1, 2)? .contiguous() } fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> { let in_dtype = xs.dtype(); let (bsz, seq_len, embed_dim) = xs.dims3()?; let proj_shape = (bsz * self.num_attention_heads, seq_len, self.head_dim); let query_states = self .shape(&(self.q_proj.forward(xs)? * self.scale)?, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let key_states = self .shape(&self.k_proj.forward(xs)?, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let value_states = self .shape(&self.v_proj.forward(xs)?, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?; let src_len = key_states.dim(1)?; let attn_weights = if let Some(causal_attention_mask) = causal_attention_mask { attn_weights .reshape((bsz, self.num_attention_heads, seq_len, src_len))? .broadcast_add(causal_attention_mask)? .reshape((bsz * self.num_attention_heads, seq_len, src_len))? } else { attn_weights }; let attn_weights = nn::ops::softmax(&attn_weights, D::Minus1)?; let attn_output = attn_weights.matmul(&value_states)?.to_dtype(in_dtype)?; let attn_output = attn_output .reshape((bsz, self.num_attention_heads, seq_len, self.head_dim))? .transpose(1, 2)? .reshape((bsz, seq_len, embed_dim))?; self.out_proj.forward(&attn_output) } } #[derive(Clone, Debug)] struct ChineseClipVisionMlp { fc1: nn::Linear, fc2: nn::Linear, activation: Activation, } impl ChineseClipVisionMlp { fn new(var: nn::VarBuilder, config: &EncoderConfig) -> Result<Self> { let fc1 = nn::linear( config.embed_dim(), config.intermediate_size(), var.pp("fc1"), )?; let fc2 = nn::linear( config.intermediate_size(), config.embed_dim(), var.pp("fc2"), )?; Ok(ChineseClipVisionMlp { fc1, fc2, activation: config.activation(), }) } } impl ChineseClipVisionMlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?; self.fc2.forward(&self.activation.forward(&xs)?) } } #[derive(Clone, Debug)] struct ChineseClipVisionEncoderLayer { self_attn: ChineseClipVisionAttention, layer_norm1: nn::LayerNorm, mlp: ChineseClipVisionMlp, layer_norm2: nn::LayerNorm, } impl ChineseClipVisionEncoderLayer { fn new(var: nn::VarBuilder, config: &EncoderConfig) -> Result<Self> { let self_attn = ChineseClipVisionAttention::new(var.pp("self_attn"), config)?; let layer_norm1 = nn::layer_norm( config.embed_dim(), config.layer_norm_eps(), var.pp("layer_norm1"), )?; let mlp = ChineseClipVisionMlp::new(var.pp("mlp"), config)?; let layer_norm2 = nn::layer_norm( config.embed_dim(), config.layer_norm_eps(), var.pp("layer_norm2"), )?; Ok(ChineseClipVisionEncoderLayer { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = self.layer_norm1.forward(xs)?; let xs = self.self_attn.forward(&xs, causal_attention_mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = self.layer_norm2.forward(&xs)?; let xs = self.mlp.forward(&xs)?; xs + residual } } #[derive(Clone, Debug)] pub struct ChineseClipVisionEncoder { layers: Vec<ChineseClipVisionEncoderLayer>, } impl ChineseClipVisionEncoder { pub fn new(var: nn::VarBuilder, config: &EncoderConfig) -> Result<Self> { let vs = var.pp("layers"); let mut layers: Vec<ChineseClipVisionEncoderLayer> = Vec::new(); for index in 0..config.num_hidden_layers() { let layer = ChineseClipVisionEncoderLayer::new(vs.pp(index.to_string()), config)?; layers.push(layer) } Ok(ChineseClipVisionEncoder { layers }) } pub fn forward(&self, xs: &Tensor, causal_attention_mask: Option<&Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, causal_attention_mask)?; } Ok(xs) } // required by LLaVA pub fn output_hidden_states( &self, xs: &Tensor, causal_attention_mask: Option<&Tensor>, ) -> Result<Vec<Tensor>> { let mut xs = xs.clone(); let mut hidden_states = Vec::new(); for layer in self.layers.iter() { xs = layer.forward(&xs, causal_attention_mask)?; hidden_states.push(xs.clone()); } Ok(hidden_states) } } #[derive(Clone, Debug)] pub struct ChineseClipVisionTransformer { embeddings: ChineseClipVisionEmbeddings, encoder: ChineseClipVisionEncoder, pre_layer_norm: nn::LayerNorm, final_layer_norm: nn::LayerNorm, } impl ChineseClipVisionTransformer { pub fn new(var: nn::VarBuilder, config: &ChineseClipVisionConfig) -> Result<Self> { let embed_dim = config.hidden_size; let embeddings = ChineseClipVisionEmbeddings::new(var.pp("embeddings"), config)?; let pre_layer_norm = nn::layer_norm(embed_dim, config.layer_norm_eps, var.pp("pre_layrnorm"))?; let encoder = ChineseClipVisionEncoder::new( var.pp("encoder"), &EncoderConfig::Vision(config.clone()), )?; let final_layer_norm = nn::layer_norm(embed_dim, config.layer_norm_eps, var.pp("post_layernorm"))?; Ok(Self { embeddings, encoder, final_layer_norm, pre_layer_norm, }) } // required by LLaVA pub fn output_hidden_states(&self, pixel_values: &Tensor) -> Result<Vec<Tensor>> { let hidden_states = pixel_values .apply(&self.embeddings)? .apply(&self.pre_layer_norm)?; let mut result = self.encoder.output_hidden_states(&hidden_states, None)?; let encoder_outputs = result.last().context("no last")?; let pooled_output = encoder_outputs.i((.., 0, ..))?; result.push(self.final_layer_norm.forward(&pooled_output)?.clone()); Ok(result) } } impl Module for ChineseClipVisionTransformer { fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> { let hidden_states = pixel_values .apply(&self.embeddings)? .apply(&self.pre_layer_norm)?; let encoder_outputs = self.encoder.forward(&hidden_states, None)?; // referer: https://github.com/huggingface/transformers/blob/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip/modeling_clip.py#L787 let pooled_output = encoder_outputs.i((.., 0, ..))?; self.final_layer_norm.forward(&pooled_output) } }
candle/candle-transformers/src/models/chinese_clip/vision_model.rs/0
{ "file_path": "candle/candle-transformers/src/models/chinese_clip/vision_model.rs", "repo_id": "candle", "token_count": 6262 }
//! EnCodec neural audio codec based on the Encodec implementation. //! //! See ["High Fidelity Neural Audio Compression"](https://arxiv.org/abs/2210.13438) //! //! Based on implementation from [huggingface/transformers](https://github.com/huggingface/transformers/blob/main/src/transformers/models/encodec/modeling_encodec.py) use candle::{DType, IndexOp, Layout, Module, Result, Shape, Tensor, D}; use candle_nn::{conv1d, Conv1d, ConvTranspose1d, VarBuilder}; // Encodec Model // https://github.com/huggingface/transformers/blob/main/src/transformers/models/encodec/modeling_encodec.py #[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Deserialize)] pub enum NormType { WeightNorm, TimeGroupNorm, None, } #[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Deserialize)] pub enum PadMode { Constant, Reflect, Replicate, } #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct Config { pub target_bandwidths: Vec<f64>, pub sampling_rate: usize, pub audio_channels: usize, pub normalize: bool, pub chunk_length_s: Option<usize>, pub overlap: Option<usize>, pub hidden_size: usize, pub num_filters: usize, pub num_residual_layers: usize, pub upsampling_ratios: Vec<usize>, pub norm_type: NormType, pub kernel_size: usize, pub last_kernel_size: usize, pub residual_kernel_size: usize, pub dilation_growth_rate: usize, pub use_causal_conv: bool, pub pad_mode: PadMode, pub compress: usize, pub num_lstm_layers: usize, pub trim_right_ratio: f64, pub codebook_size: usize, pub codebook_dim: Option<usize>, pub use_conv_shortcut: bool, } impl Default for Config { fn default() -> Self { Self { target_bandwidths: vec![1.5, 3.0, 6.0, 12.0, 24.0], sampling_rate: 24_000, audio_channels: 1, normalize: false, chunk_length_s: None, overlap: None, hidden_size: 128, num_filters: 32, num_residual_layers: 1, upsampling_ratios: vec![8, 5, 4, 2], norm_type: NormType::WeightNorm, kernel_size: 7, last_kernel_size: 7, residual_kernel_size: 3, dilation_growth_rate: 2, use_causal_conv: true, // This should be PadMode::Reflect which is currently unsupported in candle. pad_mode: PadMode::Replicate, compress: 2, num_lstm_layers: 2, trim_right_ratio: 1.0, codebook_size: 1024, codebook_dim: None, use_conv_shortcut: true, } } } impl Config { fn codebook_dim(&self) -> usize { self.codebook_dim.unwrap_or(self.hidden_size) } fn frame_rate(&self) -> usize { let hop_length: usize = self.upsampling_ratios.iter().product(); self.sampling_rate.div_ceil(hop_length) } fn num_quantizers(&self) -> usize { let num = 1000f64 * self .target_bandwidths .last() .expect("empty target_bandwidths"); (num as usize) / (self.frame_rate() * 10) } } fn get_extra_padding_for_conv1d( xs: &Tensor, k_size: usize, stride: usize, padding_total: usize, ) -> Result<usize> { let len = xs.dim(D::Minus1)?; let n_frames = (len + padding_total).saturating_sub(k_size) as f64 / stride as f64 + 1.0; let ideal_len = ((n_frames.ceil() as usize - 1) * stride + k_size).saturating_sub(padding_total); Ok(ideal_len.saturating_sub(len)) } fn pad1d(xs: &Tensor, pad_l: usize, pad_r: usize, mode: PadMode) -> Result<Tensor> { match mode { PadMode::Constant => xs.pad_with_zeros(D::Minus1, pad_l, pad_r), PadMode::Reflect => candle::bail!("pad-mode 'reflect' is not supported"), PadMode::Replicate => xs.pad_with_same(D::Minus1, pad_l, pad_r), } } // Applies weight norm for inference by recomputing the weight tensor. This // does not apply to training. // https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html pub fn conv1d_weight_norm( in_c: usize, out_c: usize, kernel_size: usize, config: candle_nn::Conv1dConfig, vb: VarBuilder, ) -> Result<Conv1d> { let weight_g = vb.get((out_c, 1, 1), "weight_g")?; let weight_v = vb.get((out_c, in_c, kernel_size), "weight_v")?; let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?; let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?; let bias = vb.get(out_c, "bias")?; Ok(Conv1d::new(weight, Some(bias), config)) } pub fn conv_transpose1d_weight_norm( in_c: usize, out_c: usize, kernel_size: usize, bias: bool, config: candle_nn::ConvTranspose1dConfig, vb: VarBuilder, ) -> Result<ConvTranspose1d> { let weight_g = vb.get((in_c, 1, 1), "weight_g")?; let weight_v = vb.get((in_c, out_c, kernel_size), "weight_v")?; let norm_v = weight_v.sqr()?.sum_keepdim((1, 2))?.sqrt()?; let weight = weight_v.broadcast_mul(&weight_g)?.broadcast_div(&norm_v)?; let bias = if bias { Some(vb.get(out_c, "bias")?) } else { None }; Ok(ConvTranspose1d::new(weight, bias, config)) } struct CodebookEncode; impl candle::CustomOp2 for CodebookEncode { fn name(&self) -> &'static str { "cb" } fn cpu_fwd( &self, lhs_storage: &candle::CpuStorage, lhs_layout: &Layout, rhs_storage: &candle::CpuStorage, rhs_layout: &Layout, ) -> Result<(candle::CpuStorage, Shape)> { use rayon::prelude::*; let (lhs_dim1, lhs_dim2) = lhs_layout.shape().dims2()?; let (rhs_dim1, rhs_dim2) = rhs_layout.shape().dims2()?; if lhs_dim2 != rhs_dim2 { candle::bail!("CodebookEncode, mismatch on last dim, {lhs_layout:?} {rhs_layout:?}"); } if lhs_dim2 == 0 { candle::bail!("CodebookEncode, empty last dim {lhs_layout:?}") } let lhs = match lhs_layout.contiguous_offsets() { None => candle::bail!("CodebookEncode, lhs has to be contiguous, got {lhs_layout:?}"), Some((o1, o2)) => { let slice = lhs_storage.as_slice::<f32>()?; &slice[o1..o2] } }; let rhs = match rhs_layout.contiguous_offsets() { None => candle::bail!("CodebookEncode, rhs has to be contiguous, got {rhs_layout:?}"), Some((o1, o2)) => { let slice = rhs_storage.as_slice::<f32>()?; &slice[o1..o2] } }; let dst = (0..lhs_dim1) .into_par_iter() .map(|idx1| { let mut where_min = 0; let mut min_dist = f32::INFINITY; let lhs = &lhs[idx1 * lhs_dim2..(idx1 + 1) * lhs_dim2]; for idx2 in 0..rhs_dim1 { let rhs = &rhs[idx2 * rhs_dim2..(idx2 + 1) * rhs_dim2]; let mut dist = 0f32; for (a, b) in lhs.iter().zip(rhs.iter()) { dist += (a - b) * (a - b) } if dist < min_dist { min_dist = dist; where_min = idx2; } } where_min as u32 }) .collect(); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (lhs_dim1,).into())) } } // https://github.com/huggingface/transformers/blob/abaca9f9432a84cfaa95531de4c72334f38a42f2/src/transformers/models/encodec/modeling_encodec.py#L340 #[allow(unused)] #[derive(Clone, Debug)] pub struct EuclideanCodebook { inited: Tensor, cluster_size: Tensor, embed: candle_nn::Embedding, embed_avg: Tensor, c2: Tensor, } impl EuclideanCodebook { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let inited = vb.get(1, "inited")?; let cluster_size = vb.get(cfg.codebook_size, "cluster_size")?; let e_shape = (cfg.codebook_size, cfg.codebook_dim()); let embed = vb.get(e_shape, "embed")?; let c2 = ((&embed * &embed)?.sum(D::Minus1)? / 2.0)?; let embed_avg = vb.get(e_shape, "embed_avg")?; Ok(Self { inited, cluster_size, embed: candle_nn::Embedding::new(embed, cfg.codebook_dim()), embed_avg, c2, }) } pub fn encode_slow(&self, xs: &Tensor) -> Result<Tensor> { let mut target_shape = xs.dims().to_vec(); target_shape.pop(); let xs = xs.flatten_to(D::Minus2)?; let _ = xs.dims2()?; let dot_prod = xs.matmul(&self.embed.embeddings().t()?)?; let codes = self.c2.broadcast_sub(&dot_prod)?.argmin(D::Minus1)?; codes.reshape(target_shape) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let mut target_shape = xs.dims().to_vec(); target_shape.pop(); let xs = xs.flatten_to(D::Minus2)?; let _ = xs.dims2()?; let codes = Tensor::apply_op2(&xs, self.embed.embeddings(), CodebookEncode)?; codes.reshape(target_shape) } pub fn decode(&self, embed_ind: &Tensor) -> Result<Tensor> { let quantize = self.embed.forward(embed_ind)?; Ok(quantize) } } #[derive(Clone, Debug)] pub struct VectorQuantization { codebook: EuclideanCodebook, } impl VectorQuantization { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let codebook = EuclideanCodebook::new(cfg, vb.pp("codebook"))?; Ok(Self { codebook }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.transpose(1, 2)?; self.codebook.encode_slow(&xs) } pub fn decode(&self, embed_ind: &Tensor) -> Result<Tensor> { let quantize = self.codebook.decode(embed_ind)?; let quantize = quantize.transpose(1, 2)?; Ok(quantize) } } #[derive(Clone, Debug)] pub struct ResidualVectorQuantizer { layers: Vec<VectorQuantization>, dtype: DType, } impl ResidualVectorQuantizer { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = &vb.pp("layers"); let layers = (0..cfg.num_quantizers()) .map(|i| VectorQuantization::new(cfg, vb.pp(i))) .collect::<Result<Vec<_>>>()?; Ok(Self { layers, dtype: vb.dtype(), }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let mut codes = Vec::with_capacity(self.layers.len()); let mut residual = xs.clone(); for layer in self.layers.iter() { let indices = layer.encode(&residual)?; let quantized = layer.decode(&indices)?; residual = (residual - quantized)?; codes.push(indices) } Tensor::stack(&codes, 0) } pub fn decode(&self, codes: &Tensor) -> Result<Tensor> { let mut quantized_out = Tensor::zeros((), self.dtype, codes.device())?; let ncodes = codes.dim(0)?; if ncodes > self.layers.len() { candle::bail!( "codes shape {:?} does not match the number of quantization layers {}", codes.shape(), self.layers.len() ) } for (i, layer) in self.layers.iter().take(ncodes).enumerate() { let quantized = layer.decode(&codes.i(i)?)?; quantized_out = quantized.broadcast_add(&quantized_out)?; } Ok(quantized_out) } } // https://github.com/huggingface/transformers/blob/abaca9f9432a84cfaa95531de4c72334f38a42f2/src/transformers/models/encodec/modeling_encodec.py#L226 #[derive(Clone, Debug)] pub struct EncodecLSTM { layers: Vec<candle_nn::LSTM>, } impl EncodecLSTM { pub fn new(dim: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = &vb.pp("lstm"); let mut layers = vec![]; for layer_idx in 0..cfg.num_lstm_layers { let config = candle_nn::LSTMConfig { layer_idx, ..Default::default() }; let lstm = candle_nn::lstm(dim, dim, config, vb.clone())?; layers.push(lstm) } Ok(Self { layers }) } } impl Module for EncodecLSTM { fn forward(&self, xs: &Tensor) -> Result<Tensor> { use candle_nn::RNN; // This is different from the Python transformers version as candle LSTM is batch first. let xs = xs.t()?; let residual = &xs; let mut xs = xs.clone(); for layer in self.layers.iter() { let states = layer.seq(&xs)?; xs = layer.states_to_tensor(&states)?; } let xs = (xs + residual)?.t()?; Ok(xs) } } #[derive(Clone, Debug)] pub struct EncodecConvTranspose1d { conv: ConvTranspose1d, } impl EncodecConvTranspose1d { fn new( in_c: usize, out_c: usize, k: usize, stride: usize, _cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let cfg = candle_nn::ConvTranspose1dConfig { stride, ..Default::default() }; let conv = conv_transpose1d_weight_norm(in_c, out_c, k, true, cfg, vb.pp("conv"))?; Ok(Self { conv }) } } impl Module for EncodecConvTranspose1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.conv) } } #[derive(Clone, Debug)] pub struct EncodecConv1d { causal: bool, conv: Conv1d, norm: Option<candle_nn::GroupNorm>, pad_mode: PadMode, } impl EncodecConv1d { pub fn new( in_c: usize, out_c: usize, kernel_size: usize, stride: usize, dilation: usize, cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let conv = match cfg.norm_type { NormType::WeightNorm => conv1d_weight_norm( in_c, out_c, kernel_size, candle_nn::Conv1dConfig { stride, dilation, ..Default::default() }, vb.pp("conv"), )?, NormType::None | NormType::TimeGroupNorm => conv1d( in_c, out_c, kernel_size, candle_nn::Conv1dConfig { padding: 0, stride, groups: 1, dilation: 1, }, vb.pp("conv"), )?, }; let norm = match cfg.norm_type { NormType::None | NormType::WeightNorm => None, NormType::TimeGroupNorm => { let gn = candle_nn::group_norm(1, out_c, 1e-5, vb.pp("norm"))?; Some(gn) } }; Ok(Self { causal: cfg.use_causal_conv, conv, norm, pad_mode: cfg.pad_mode, }) } } impl Module for EncodecConv1d { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _t, _c) = xs.dims3()?; let k_size = self.conv.weight().dim(D::Minus1)?; let conv_cfg = self.conv.config(); // Effective kernel size with dilations. let k_size = (k_size - 1) * conv_cfg.dilation + 1; let padding_total = k_size - conv_cfg.stride; let extra_padding = get_extra_padding_for_conv1d(xs, k_size, conv_cfg.stride, padding_total)?; let xs = if self.causal { pad1d(xs, padding_total, extra_padding, self.pad_mode)? } else { let padding_right = padding_total / 2; let padding_left = padding_total - padding_right; pad1d( xs, padding_left, padding_right + extra_padding, self.pad_mode, )? }; let xs = self.conv.forward(&xs)?; match &self.norm { None => Ok(xs), Some(norm) => xs.apply(norm), } } } #[derive(Clone, Debug)] pub struct EncodecResnetBlock { block_conv1: EncodecConv1d, block_conv2: EncodecConv1d, shortcut: Option<EncodecConv1d>, } impl EncodecResnetBlock { pub fn new( dim: usize, (dilation1, dilation2): (usize, usize), cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let h = dim / cfg.compress; let mut layer = Layer::new(vb.pp("block")); // TODO: Apply dilations! layer.inc(); let block_conv1 = EncodecConv1d::new( dim, h, cfg.residual_kernel_size, 1, dilation1, cfg, layer.next(), )?; layer.inc(); let block_conv2 = EncodecConv1d::new(h, dim, 1, 1, dilation2, cfg, layer.next())?; let shortcut = if cfg.use_conv_shortcut { let conv = EncodecConv1d::new(dim, dim, 1, 1, 1, cfg, vb.pp("shortcut"))?; Some(conv) } else { None }; Ok(Self { block_conv1, block_conv2, shortcut, }) } } impl Module for EncodecResnetBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs.clone(); let xs = xs.elu(1.)?; let xs = self.block_conv1.forward(&xs)?; let xs = xs.elu(1.)?; let xs = self.block_conv2.forward(&xs)?; let xs = match &self.shortcut { None => (xs + residual)?, Some(shortcut) => xs.add(&shortcut.forward(&residual)?)?, }; Ok(xs) } } struct Layer<'a> { vb: VarBuilder<'a>, cnt: usize, } impl<'a> Layer<'a> { fn new(vb: VarBuilder<'a>) -> Self { Self { vb, cnt: 0 } } fn inc(&mut self) { self.cnt += 1; } fn next(&mut self) -> VarBuilder { let vb = self.vb.pp(self.cnt.to_string()); self.cnt += 1; vb } } #[derive(Clone, Debug)] pub struct Encoder { init_conv: EncodecConv1d, sampling_layers: Vec<(Vec<EncodecResnetBlock>, EncodecConv1d)>, final_lstm: EncodecLSTM, final_conv: EncodecConv1d, } impl Encoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let mut layer = Layer::new(vb.pp("layers")); let init_conv = EncodecConv1d::new( cfg.audio_channels, cfg.num_filters, cfg.kernel_size, 1, 1, cfg, layer.next(), )?; let mut sampling_layers = vec![]; let mut scaling = 1; for &ratio in cfg.upsampling_ratios.iter().rev() { let current_scale = scaling * cfg.num_filters; let mut resnets = vec![]; for j in 0..(cfg.num_residual_layers as u32) { let resnet = EncodecResnetBlock::new( current_scale, (cfg.dilation_growth_rate.pow(j), 1), cfg, layer.next(), )?; resnets.push(resnet) } layer.inc(); // ELU let conv1d = EncodecConv1d::new( current_scale, current_scale * 2, ratio * 2, ratio, 1, cfg, layer.next(), )?; sampling_layers.push((resnets, conv1d)); scaling *= 2; } let final_lstm = EncodecLSTM::new(cfg.num_filters * scaling, cfg, layer.next())?; layer.inc(); // ELU let final_conv = EncodecConv1d::new( cfg.num_filters * scaling, cfg.hidden_size, cfg.last_kernel_size, 1, 1, cfg, layer.next(), )?; Ok(Self { init_conv, sampling_layers, final_conv, final_lstm, }) } } impl Module for Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.init_conv)?; for (resnets, conv) in self.sampling_layers.iter() { for resnet in resnets.iter() { xs = xs.apply(resnet)?; } xs = xs.elu(1.0)?.apply(conv)?; } xs.apply(&self.final_lstm)? .elu(1.0)? .apply(&self.final_conv) } } #[derive(Clone, Debug)] pub struct Decoder { init_conv: EncodecConv1d, init_lstm: EncodecLSTM, sampling_layers: Vec<(EncodecConvTranspose1d, Vec<EncodecResnetBlock>)>, final_conv: EncodecConv1d, } impl Decoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let mut layer = Layer::new(vb.pp("layers")); let mut scaling = usize::pow(2, cfg.upsampling_ratios.len() as u32); let init_conv = EncodecConv1d::new( cfg.hidden_size, cfg.num_filters * scaling, cfg.last_kernel_size, 1, 1, cfg, layer.next(), )?; let init_lstm = EncodecLSTM::new(cfg.num_filters * scaling, cfg, layer.next())?; let mut sampling_layers = vec![]; for &ratio in cfg.upsampling_ratios.iter() { let current_scale = scaling * cfg.num_filters; layer.inc(); // ELU let conv1d = EncodecConvTranspose1d::new( current_scale, current_scale / 2, ratio * 2, ratio, cfg, layer.next(), )?; let mut resnets = vec![]; for j in 0..(cfg.num_residual_layers as u32) { let resnet = EncodecResnetBlock::new( current_scale / 2, (cfg.dilation_growth_rate.pow(j), 1), cfg, layer.next(), )?; resnets.push(resnet) } sampling_layers.push((conv1d, resnets)); scaling /= 2; } layer.inc(); // ELU let final_conv = EncodecConv1d::new( cfg.num_filters, cfg.audio_channels, cfg.last_kernel_size, 1, 1, cfg, layer.next(), )?; Ok(Self { init_conv, init_lstm, sampling_layers, final_conv, }) } } impl Module for Decoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.init_conv)?.apply(&self.init_lstm)?; for (conv, resnets) in self.sampling_layers.iter() { xs = xs.elu(1.)?.apply(conv)?; for resnet in resnets.iter() { xs = xs.apply(resnet)? } } xs.elu(1.)?.apply(&self.final_conv) } } #[derive(Debug)] pub struct Model { encoder: Encoder, decoder: Decoder, quantizer: ResidualVectorQuantizer, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let decoder = Decoder::new(cfg, vb.pp("decoder"))?; let quantizer = ResidualVectorQuantizer::new(cfg, vb.pp("quantizer"))?; Ok(Self { encoder, decoder, quantizer, }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.encoder.forward(xs)?; let codes = self.quantizer.encode(&xs)?; codes.transpose(0, 1) } pub fn decode(&self, codes: &Tensor) -> Result<Tensor> { let (_b_sz, _codebooks, _seqlen) = codes.dims3()?; let codes = codes.transpose(0, 1)?; let embeddings = self.quantizer.decode(&codes)?; let outputs = self.decoder.forward(&embeddings)?; Ok(outputs) } }
candle/candle-transformers/src/models/encodec.rs/0
{ "file_path": "candle/candle-transformers/src/models/encodec.rs", "repo_id": "candle", "token_count": 12744 }
//! MixFormer (Microsoft's Phi Architecture) //! //! See "Textbooks Are All You Need II: phi-1.5 technical report", Lin et al. 2023 //! - [Arxiv](https://arxiv.org/abs/2309.05463) //! - [Github](https://huggingface.co/microsoft/phi-1_5) //! use crate::models::with_tracing::{linear, Embedding as E, Linear}; /// MixFormer model. /// https://huggingface.co/microsoft/phi-1_5 /// https://arxiv.org/abs/2309.05463 use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use serde::Deserialize; const MAX_SEQ_LEN: usize = 4096; // https://huggingface.co/microsoft/phi-1_5/blob/d38e6f954ec29b96fe2cf033937dad64e279b5d9/configuration_mixformer_sequential.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) n_positions: usize, pub(crate) n_embd: usize, pub(crate) n_layer: usize, pub(crate) n_inner: Option<usize>, pub(crate) n_head: usize, pub(crate) rotary_dim: usize, pub(crate) activation_function: Activation, pub(crate) layer_norm_epsilon: f64, pub(crate) tie_word_embeddings: bool, pub(crate) pad_vocab_size_multiple: usize, } impl Config { pub fn v1() -> Self { Self { vocab_size: 50304, n_positions: 2048, n_embd: 1024, n_layer: 20, n_inner: None, n_head: 16, rotary_dim: usize::min(32, 1024 / 16), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } pub fn v1_5() -> Self { Self { vocab_size: 51200, n_positions: 2048, n_embd: 2048, n_layer: 24, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2048 / 32), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } pub fn v2() -> Self { Self { vocab_size: 51200, n_positions: 2048, n_embd: 2560, n_layer: 32, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2560 / 32), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } // https://huggingface.co/teknium/Puffin-Phi-v2/blob/main/config.json pub fn puffin_phi_v2() -> Self { Self { vocab_size: 50304, n_positions: 2048, n_embd: 2048, n_layer: 24, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2048 / 32), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } // https://huggingface.co/teknium/Phi-Hermes-1.3B/blob/main/config.json pub fn phi_hermes_1_3b() -> Self { Self { vocab_size: 50304, n_positions: 2048, n_embd: 2048, n_layer: 24, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2048 / 32), activation_function: Activation::NewGelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } } #[derive(Debug, Clone)] struct Embedding { wte: E, } impl Embedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let wte = E::new(cfg.vocab_size, cfg.n_embd, vb.pp("wte"))?; Ok(Self { wte }) } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.wte.forward(xs) } } fn get_mask(size: usize, dtype: DType, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| if j > i { f32::NEG_INFINITY } else { 0. })) .collect(); Tensor::from_slice(&mask, (size, size), device)?.to_dtype(dtype) } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dim: usize, max_seq_len: usize, dtype: DType, dev: &Device) -> Result<Self> { let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(DType::F32)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?.to_dtype(dtype)?, cos: freqs.cos()?.to_dtype(dtype)?, }) } fn apply_rotary_emb_qkv( &self, qkv: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor, Tensor)> { let (_b_size, seqlen, three, _, _headdim) = qkv.dims5()?; if three != 3 { candle::bail!("unexpected shape for qkv {:?}", qkv.shape()) } let (_rotary_seqlen, rotary_dim) = self.cos.dims2()?; let rotary_dim = rotary_dim * 2; let q_rot = qkv.i((.., .., 0, .., ..rotary_dim))?.contiguous()?; let q_pass = qkv.i((.., .., 0, .., rotary_dim..))?; let k_rot = qkv.i((.., .., 1, .., ..rotary_dim))?.contiguous()?; let k_pass = qkv.i((.., .., 1, .., rotary_dim..))?; let c = self.cos.narrow(0, seqlen_offset, seqlen)?; let s = self.sin.narrow(0, seqlen_offset, seqlen)?; let q_rot = candle_nn::rotary_emb::rope_thd(&q_rot, &c, &s)?; let k_rot = candle_nn::rotary_emb::rope_thd(&k_rot, &c, &s)?; let q = Tensor::cat(&[&q_rot, &q_pass], D::Minus1)?; let k = Tensor::cat(&[&k_rot, &k_pass], D::Minus1)?; let v = qkv.i((.., .., 2))?; Ok((q, k, v)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { fc1: Linear, fc2: Linear, act: Activation, span: tracing::Span, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_inner = cfg.n_inner.unwrap_or(4 * cfg.n_embd); let fc1 = linear(cfg.n_embd, n_inner, vb.pp("fc1"))?; let fc2 = linear(n_inner, cfg.n_embd, vb.pp("fc2"))?; Ok(Self { fc1, fc2, act: cfg.activation_function, span: tracing::span!(tracing::Level::TRACE, "mlp"), }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2) } } #[derive(Debug, Clone)] struct CausalLMHead { ln: candle_nn::LayerNorm, linear: Linear, } impl CausalLMHead { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln = candle_nn::layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?; let linear = linear(cfg.n_embd, cfg.vocab_size, vb.pp("linear"))?; Ok(Self { ln, linear }) } } impl Module for CausalLMHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.ln)? .apply(&self.linear)? .to_dtype(DType::F32) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MHA { wqkv: Linear, out_proj: Linear, rotary_emb: RotaryEmbedding, kv_cache: Option<(Tensor, Tensor)>, head_dim: usize, softmax_scale: f64, span: tracing::Span, span_rope: tracing::Span, span_mask: tracing::Span, span_softmax: tracing::Span, } impl MHA { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let head_dim = cfg.n_embd / cfg.n_head; let op_size = cfg.n_embd; let wqkv = linear(cfg.n_embd, 3 * op_size, vb.pp("Wqkv"))?; let out_proj = linear(op_size, cfg.n_embd, vb.pp("out_proj"))?; let rotary_emb = RotaryEmbedding::new(cfg.rotary_dim, MAX_SEQ_LEN, vb.dtype(), vb.device())?; let softmax_scale = 1f64 / (head_dim as f64).sqrt(); Ok(Self { wqkv, out_proj, head_dim, kv_cache: None, rotary_emb, softmax_scale, span: tracing::span!(tracing::Level::TRACE, "mha"), span_rope: tracing::span!(tracing::Level::TRACE, "rope"), span_mask: tracing::span!(tracing::Level::TRACE, "mask"), span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len, _n_embd) = xs.dims3()?; let qkv = self .wqkv .forward(xs)? .reshape((b_size, seq_len, 3, (), self.head_dim))?; let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(1)?, }; // In the python implementation, a single tensor is returned with the third axis of size 3. let (q, k, v) = { let _enter = self.span_rope.enter(); self.rotary_emb.apply_rotary_emb_qkv(&qkv, seqlen_offset)? }; let (k, v) = match &self.kv_cache { None => (k, v), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &k], 1)?; let v = Tensor::cat(&[prev_v, &v], 1)?; (k, v) } }; self.kv_cache = Some((k.clone(), v.clone())); // scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale) let q = q.transpose(1, 2)?.flatten_to(1)?; // b*h, t, d let k = k.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d let v = v.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d let attn_weights = (q.matmul(&k.t()?)? * self.softmax_scale)?; // b*h, t, s // causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=scores.device), 1) // scores = scores + causal_mask.to(dtype=scores.dtype) let attn_weights = match mask { None => attn_weights, Some(mask) => { let _enter = self.span_mask.enter(); attn_weights.broadcast_add(mask)? } }; let attn_weights = { let _enter = self.span_softmax.enter(); candle_nn::ops::softmax_last_dim(&attn_weights)? }; // output = torch.einsum('bhts,bshd->bthd', attention_drop, v) // attn_weights: b*h,t,s, v: b*h,s,d let attn_output = attn_weights.matmul(&v)?; // b*h,t,d let attn_output = attn_output .reshape((b_size, (), seq_len, self.head_dim))? .transpose(1, 2)? .flatten_from(D::Minus2)?; attn_output.apply(&self.out_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct ParallelBlock { ln: candle_nn::LayerNorm, mixer: MHA, mlp: MLP, span: tracing::Span, } impl ParallelBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln = candle_nn::layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?; let mixer = MHA::new(cfg, vb.pp("mixer"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { ln, mixer, mlp, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let residual = xs; let xs = xs.apply(&self.ln)?; let attn_outputs = self.mixer.forward(&xs, mask)?; let feed_forward_hidden_states = self.mlp.forward(&xs)?; attn_outputs + feed_forward_hidden_states + residual } fn clear_kv_cache(&mut self) { self.mixer.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct MixFormerSequentialForCausalLM { embedding: Embedding, blocks: Vec<ParallelBlock>, head: CausalLMHead, span: tracing::Span, } impl MixFormerSequentialForCausalLM { pub fn new_v2(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_head = vb.pp("lm_head"); let vb = vb.pp("transformer"); let embedding = Embedding::new(cfg, vb.pp("embd"))?; let mut blocks = Vec::new(); for i in 0..cfg.n_layer { let block = ParallelBlock::new(cfg, vb.pp("h").pp(i))?; blocks.push(block) } let head = CausalLMHead::new(cfg, vb_head)?; Ok(Self { embedding, blocks, head, span: tracing::span!(tracing::Level::TRACE, "mixformer"), }) } pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("layers"); let embedding = Embedding::new(cfg, vb.pp(0))?; let mut blocks = Vec::new(); for i in 0..cfg.n_layer { let block = ParallelBlock::new(cfg, vb.pp(i + 1))?; blocks.push(block) } let head = CausalLMHead::new(cfg, vb.pp(cfg.n_layer + 1))?; Ok(Self { embedding, blocks, head, span: tracing::span!(tracing::Level::TRACE, "mixformer"), }) } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_b_size, seq_len) = xs.dims2()?; let mut xs = xs.apply(&self.embedding)?; let mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.dtype(), xs.device())?) }; for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())? } xs.narrow(1, seq_len - 1, 1)?.apply(&self.head)?.squeeze(1) } pub fn forward_with_img( &mut self, bos_token: &Tensor, xs: &Tensor, img_embeds: &Tensor, ) -> Result<Tensor> { let _enter = self.span.enter(); let xs = xs.apply(&self.embedding)?; let bos_token = bos_token.apply(&self.embedding)?; // Python implementation sequence order is <bos token embedding><img embedding><rest of text embedding> // https://github.com/vikhyat/moondream/blob/a9d788a20d1543fb1479edc54106e88cff7759d3/moondream/moondream.py#L43-L56 let mut xs = Tensor::cat(&[bos_token, img_embeds.clone(), xs], 1)?; let (_b_size, seq_len, _embds) = xs.dims3()?; let mask = Some(get_mask(seq_len, xs.dtype(), xs.device())?); for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())? } let xs = xs .narrow(1, seq_len - 1, 1)? .apply(&self.head)? .squeeze(1)?; Ok(xs) } pub fn clear_kv_cache(&mut self) { self.blocks.iter_mut().for_each(|b| b.clear_kv_cache()) } }
candle/candle-transformers/src/models/mixformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/mixformer.rs", "repo_id": "candle", "token_count": 8060 }
use super::embedding::Model as EmbeddingModel; use crate::models::{ mistral::Config, with_tracing::{layer_norm, linear, linear_no_bias, LayerNorm, Linear}, }; use candle::{DType, Device, Result, Tensor, D}; use candle_nn::{ops::softmax_last_dim, LayerNormConfig, Module, VarBuilder}; // Geglu and feedforward from candle-transformers/src/models/stable_diffusion/attention.rs #[derive(Debug)] struct GeGlu { proj: Linear, span: tracing::Span, } impl GeGlu { fn new(vs: VarBuilder, dim_in: usize, dim_out: usize) -> Result<Self> { let proj = linear(dim_in, dim_out * 2, vs)?; let span = tracing::span!(tracing::Level::TRACE, "geglu"); Ok(Self { proj, span }) } } impl Module for GeGlu { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states_and_gate = self.proj.forward(xs)?.chunk(2, D::Minus1)?; &hidden_states_and_gate[0] * hidden_states_and_gate[1].gelu()? } } #[derive(Debug)] struct FeedForward { project_in: GeGlu, linear: Linear, span: tracing::Span, } impl FeedForward { fn new(vs: VarBuilder, dim: usize, dim_out: Option<usize>, mult: usize) -> Result<Self> { let inner_dim = dim * mult; let dim_out = dim_out.unwrap_or(dim); let vs = vs.pp("net"); let project_in = GeGlu::new(vs.pp("0"), dim, inner_dim)?; let linear = linear(inner_dim, dim_out, vs.pp("2"))?; let span = tracing::span!(tracing::Level::TRACE, "ff"); Ok(Self { project_in, linear, span, }) } } impl Module for FeedForward { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.project_in.forward(xs)?; self.linear.forward(&xs) } } // CrossAttention from candle-transformers/src/models/stable_diffusion/attention.rs #[derive(Debug)] struct CrossAttention { to_q: Linear, to_kv: Linear, to_out: Linear, heads: usize, scale: f64, span: tracing::Span, span_attn: tracing::Span, span_softmax: tracing::Span, } impl CrossAttention { fn new( vs: VarBuilder, query_dim: usize, context_dim: Option<usize>, heads: usize, dim_head: usize, ) -> Result<Self> { let inner_dim = dim_head * heads; let context_dim = context_dim.unwrap_or(query_dim); let scale = 1.0 / f64::sqrt(dim_head as f64); let to_q = linear_no_bias(query_dim, inner_dim, vs.pp("to_q"))?; let to_kv = linear_no_bias(context_dim, inner_dim * 2, vs.pp("to_kv"))?; let to_out = linear_no_bias(inner_dim, query_dim, vs.pp("to_out"))?; let span = tracing::span!(tracing::Level::TRACE, "xa"); let span_attn = tracing::span!(tracing::Level::TRACE, "xa-attn"); let span_softmax = tracing::span!(tracing::Level::TRACE, "xa-softmax"); Ok(Self { to_q, to_kv, to_out, heads, scale, span, span_attn, span_softmax, }) } fn reshape_heads_to_batch_dim(&self, xs: &Tensor) -> Result<Tensor> { let (batch_size, seq_len, dim) = xs.dims3()?; xs.reshape((batch_size, seq_len, self.heads, dim / self.heads))? .transpose(1, 2)? .reshape((batch_size * self.heads, seq_len, dim / self.heads)) } fn reshape_batch_dim_to_heads(&self, xs: &Tensor) -> Result<Tensor> { let (batch_size, seq_len, dim) = xs.dims3()?; xs.reshape((batch_size / self.heads, self.heads, seq_len, dim))? .transpose(1, 2)? .reshape((batch_size / self.heads, seq_len, dim * self.heads)) } fn attention(&self, query: &Tensor, key: &Tensor, value: &Tensor) -> Result<Tensor> { let _enter = self.span_attn.enter(); let in_dtype = query.dtype(); let query = query.to_dtype(DType::F32)?; let key = key.to_dtype(DType::F32)?; let value = value.to_dtype(DType::F32)?; let xs = query.matmul(&(key.t()? * self.scale)?)?; let xs = { let _enter = self.span_softmax.enter(); softmax_last_dim(&xs)? }; let xs = xs.matmul(&value)?.to_dtype(in_dtype)?; self.reshape_batch_dim_to_heads(&xs) } fn forward(&self, xs: &Tensor, context: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let query = self.to_q.forward(xs)?; let context = context.unwrap_or(xs).contiguous()?; let kv_chunks = self .to_kv .forward(&context)? .chunk(2, context.shape().dims().len() - 1)?; let (key, value) = (kv_chunks[0].clone(), kv_chunks[1].clone()); let query = self.reshape_heads_to_batch_dim(&query)?; let key = self.reshape_heads_to_batch_dim(&key)?; let value = self.reshape_heads_to_batch_dim(&value)?; let xs = self.attention(&query, &key, &value)?; self.to_out.forward(&xs) } } #[derive(Debug)] pub struct Model { embedding_model: EmbeddingModel, cross_attn: CrossAttention, cross_attn_norm: LayerNorm, cross_attn_context_norm: LayerNorm, ff: FeedForward, ff_norm: LayerNorm, latents: Tensor, pub device: Device, pub dtype: DType, } impl Model { pub fn new(vb: VarBuilder) -> Result<Self> { // Embedding model let cfg = Config::config_7b_v0_1(false); let embedding_model = EmbeddingModel::new(&cfg, vb.pp("embedding_model"))?; // Latent attention let dim = 4096; let vb = vb.pp("latent_attention_model"); let latents = vb.get((512, dim), "latents")?; // Cross attend blocks let vb = vb.pp("cross_attend_blocks"); let cross_attn_norm = layer_norm(dim, LayerNormConfig::default(), vb.pp("0.norm"))?; let cross_attn_context_norm = layer_norm( dim, candle_nn::LayerNormConfig::default(), vb.pp("0.norm_context"), )?; let cross_attn = CrossAttention::new(vb.pp("0.fn"), dim, None, 8, 4096)?; let ff_norm = layer_norm(dim, LayerNormConfig::default(), vb.pp("1.norm"))?; let ff = FeedForward::new(vb.pp("1.fn"), dim, None, 4)?; Ok(Self { embedding_model, cross_attn, cross_attn_norm, cross_attn_context_norm, ff, ff_norm, latents, device: vb.device().clone(), dtype: vb.dtype(), }) } pub fn forward( &mut self, input_ids: &Tensor, attn_mask: &Tensor, pool_mask: &Tensor, ) -> Result<Tensor> { // Embedding model let hiddens = self .embedding_model .forward(attn_mask, input_ids, self.dtype)?; // Latent attention let b = hiddens.dims()[0]; let x = self.latents.unsqueeze(0)?.repeat((b, 1, 1))?; let original_hiddens = &hiddens; let hiddens = self.cross_attn_norm.forward(original_hiddens)?; let x = self.cross_attn_context_norm.forward(&x)?; let cross_hiddens = (self.cross_attn.forward(&hiddens, Some(&x))? + original_hiddens)?; let hiddens = self.ff_norm.forward(&cross_hiddens)?; let hiddens = (self.ff.forward(&hiddens)? + cross_hiddens)?; // Mean pooling let hiddens_masked = hiddens.broadcast_mul(&pool_mask.unsqueeze(D::Minus1)?)?; let s = hiddens_masked.sum(1)?; let d = pool_mask.sum_keepdim(1)?; s.broadcast_div(&d) } }
candle/candle-transformers/src/models/nvembed_v2/model.rs/0
{ "file_path": "candle/candle-transformers/src/models/nvembed_v2/model.rs", "repo_id": "candle", "token_count": 3730 }
//! Quantized MetaVoice model implementation. //! //! MetaVoice is a conditional text-to-speech model based on a transformer architecture. //! This implementation provides quantization for reduced memory and compute. //! //! Key characteristics: //! - Transformer-based autoregressive decoder //! - Speaker conditioning //! - Support for 8-bit quantization //! - Key-value caching for efficient inference //! - RMS normalization layers //! //! References: //! - [MetaVoice Code](https://github.com/metavoiceio/metavoice) //! use crate::quantized_nn::{linear_b, Embedding, Linear, RmsNorm}; pub use crate::quantized_var_builder::VarBuilder; use crate::models::metavoice::repeat_interleave; use candle::{Module, Result, Tensor, D}; pub mod transformer { use super::*; type Config = crate::models::metavoice::transformer::Config; #[derive(Debug, Clone)] struct FeedForward { w1: Linear, w2: Linear, w3: Linear, span: tracing::Span, } impl FeedForward { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let i_size = cfg.intermediate_size(); let w1 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w1"))?; let w2 = linear_b(i_size, cfg.dim, false, vb.pp("w2"))?; let w3 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w3"))?; Ok(Self { w1, w2, w3, span: tracing::span!(tracing::Level::TRACE, "feed-forward"), }) } } impl Module for FeedForward { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let swiglu = (candle_nn::ops::silu(&xs.apply(&self.w1)?)? * xs.apply(&self.w3))?; swiglu.apply(&self.w2) } } #[derive(Debug, Clone)] struct Attention { wqkv: Linear, wo: Linear, dim: usize, kv_size: usize, n_local_heads: usize, head_dim: usize, n_head: usize, kv_cache: Option<(Tensor, Tensor)>, span: tracing::Span, } impl Attention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_local_heads = cfg.n_local_heads(); let head_dim = cfg.head_dim(); let total_head_dim = (cfg.n_head + 2 * n_local_heads) * head_dim; let wqkv = linear_b(cfg.dim, total_head_dim, false, vb.pp("wqkv"))?; let wo = linear_b(cfg.dim, cfg.dim, false, vb.pp("wo"))?; Ok(Self { wqkv, wo, dim: cfg.dim, kv_size: n_local_heads * head_dim, n_local_heads, head_dim, n_head: cfg.n_head, kv_cache: None, span: tracing::span!(tracing::Level::TRACE, "attention"), }) } fn forward(&mut self, xs: &Tensor, _pos: usize, mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, seqlen, _) = xs.dims3()?; let qkv = xs.apply(&self.wqkv)?; let q = qkv.narrow(D::Minus1, 0, self.dim)?; let k = qkv.narrow(D::Minus1, self.dim, self.kv_size)?; let v = qkv.narrow(D::Minus1, self.dim + self.kv_size, self.kv_size)?; let q = q .reshape((b_sz, seqlen, self.n_head, self.head_dim))? .transpose(1, 2)? .contiguous()?; let k = k .reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))? .transpose(1, 2)?; let v = v .reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))? .transpose(1, 2)?; let (k, v) = match &self.kv_cache { None => (k, v), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &k], 2)?; let v = Tensor::cat(&[prev_v, &v], 2)?; (k, v) } }; self.kv_cache = Some((k.clone(), v.clone())); let k = repeat_interleave(&k, self.n_head / self.n_local_heads, 1)?; let v = repeat_interleave(&v, self.n_head / self.n_local_heads, 1)?; let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?; let attn_weights = attn_weights.broadcast_add(mask)?; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&v)?; attn_output .transpose(1, 2)? .reshape((b_sz, seqlen, self.dim))? .apply(&self.wo) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct Block { attention: Attention, feed_forward: FeedForward, ffn_norm: RmsNorm, attention_norm: RmsNorm, span: tracing::Span, } impl Block { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention = Attention::new(cfg, vb.pp("attention"))?; let feed_forward = FeedForward::new(cfg, vb.pp("feed_forward"))?; let ffn_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("ffn_norm"))?; let attention_norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("attention_norm"))?; Ok(Self { attention, feed_forward, ffn_norm, attention_norm, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward(&mut self, xs: &Tensor, pos: usize, mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hs = xs.apply(&self.attention_norm)?; let hs = (xs + self.attention.forward(&hs, pos, mask))?; &hs + hs.apply(&self.ffn_norm)?.apply(&self.feed_forward) } fn clear_kv_cache(&mut self) { self.attention.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { tok_embeddings: Embedding, pos_embeddings: Embedding, speaker_cond_pos: Linear, layers: Vec<Block>, norm: RmsNorm, output: Linear, spk_cond_mask: Tensor, span: tracing::Span, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let tok_embeddings = Embedding::new(cfg.vocab_size, cfg.dim, vb.pp("tok_embeddings"))?; let pos_embeddings = Embedding::new(cfg.block_size, cfg.dim, vb.pp("pos_embeddings"))?; let speaker_cond_pos = linear_b( cfg.speaker_emb_dim, cfg.dim, false, vb.pp("speaker_cond_pos"), )?; let mut layers = Vec::with_capacity(cfg.n_layer); let vb_l = vb.pp("layers"); for layer_idx in 0..cfg.n_layer { let layer = Block::new(cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("norm"))?; let output = linear_b(cfg.dim, cfg.vocab_size, false, vb.pp("output"))?; let spk_cond_mask = Tensor::cat( &[ Tensor::ones((1, 1, cfg.dim), candle::DType::F32, vb.device())?, Tensor::zeros((1, 1, cfg.dim), candle::DType::F32, vb.device())?, ], 0, )?; Ok(Self { tok_embeddings, pos_embeddings, speaker_cond_pos, layers, norm, output, spk_cond_mask, span: tracing::span!(tracing::Level::TRACE, "qtransformer"), }) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } pub fn forward(&mut self, xs: &Tensor, spk_emb: &Tensor, pos: usize) -> Result<Tensor> { let _enter = self.span.enter(); let (_b_sz, seqlen) = xs.dims2()?; let mask: Vec<_> = (0..seqlen) .flat_map(|i| (0..seqlen).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (1, 1, seqlen, seqlen), xs.device())?; let input_pos = Tensor::arange(pos as u32, (pos + seqlen) as u32, xs.device())?; let tok_embeddings = xs.apply(&self.tok_embeddings)?; let pos_embeddings = input_pos.apply(&self.pos_embeddings)?; let mut xs = tok_embeddings .broadcast_add(&pos_embeddings)? .broadcast_add( &spk_emb .apply(&self.speaker_cond_pos)? .broadcast_mul(&self.spk_cond_mask)?, )?; let mask = mask.to_dtype(xs.dtype())?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, pos, &mask)? } xs.narrow(1, seqlen - 1, 1)? .contiguous()? .apply(&self.norm)? .apply(&self.output) } } }
candle/candle-transformers/src/models/quantized_metavoice.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_metavoice.rs", "repo_id": "candle", "token_count": 5192 }
//! RepVGG inference implementation //! //! Key characteristics: //! - Efficient inference architecture through structural reparameterization //! - Single 3x3 conv layer after fusing 3x3 branch, 1x1 branch and identity branch //! - Different configurations including a0-a2, b0-b3 and variants with group convolutions //! - High accuracy with VGG-like plain architecture and training //! //! References: //! - [RepVGG Paper](https://arxiv.org/abs/2101.03697). RepVGG: Making VGG-style ConvNets Great Again //! - [Official Implementation](https://github.com/DingXiaoH/RepVGG) //! use candle::{Result, Tensor, D}; use candle_nn::{ batch_norm, conv2d_no_bias, linear, BatchNorm, Conv2d, Conv2dConfig, Func, VarBuilder, }; const CHANNELS_PER_STAGE: [usize; 5] = [64, 64, 128, 256, 512]; #[derive(Clone)] pub struct Config { a: f32, b: f32, groups: usize, stages: [usize; 4], } impl Config { pub fn a0() -> Self { Self { a: 0.75, b: 2.5, groups: 1, stages: [2, 4, 14, 1], } } pub fn a1() -> Self { Self { a: 1.0, b: 2.5, groups: 1, stages: [2, 4, 14, 1], } } pub fn a2() -> Self { Self { a: 1.5, b: 2.75, groups: 1, stages: [2, 4, 14, 1], } } pub fn b0() -> Self { Self { a: 1.0, b: 2.5, groups: 1, stages: [4, 6, 16, 1], } } pub fn b1() -> Self { Self { a: 2.0, b: 4.0, groups: 1, stages: [4, 6, 16, 1], } } pub fn b2() -> Self { Self { a: 2.5, b: 5.0, groups: 1, stages: [4, 6, 16, 1], } } pub fn b3() -> Self { Self { a: 3.0, b: 5.0, groups: 1, stages: [4, 6, 16, 1], } } pub fn b1g4() -> Self { Self { a: 2.0, b: 4.0, groups: 4, stages: [4, 6, 16, 1], } } pub fn b2g4() -> Self { Self { a: 2.5, b: 5.0, groups: 4, stages: [4, 6, 16, 1], } } pub fn b3g4() -> Self { Self { a: 3.0, b: 5.0, groups: 4, stages: [4, 6, 16, 1], } } } // fuses a convolutional kernel and a batchnorm layer into a convolutional layer // based on the _fuse_bn_tensor method in timm // see https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L602 fn fuse_conv_bn(weights: &Tensor, bn: BatchNorm) -> Result<(Tensor, Tensor)> { let (gamma, beta) = bn.weight_and_bias().unwrap(); let mu = bn.running_mean(); let sigma = (bn.running_var() + bn.eps())?.sqrt(); let gps = (gamma / sigma)?; let bias = (beta - mu * &gps)?; let weights = weights.broadcast_mul(&gps.reshape(((), 1, 1, 1))?)?; Ok((weights, bias)) } // A RepVGG layer has a different training time and inference time architecture. // The latter is a simple and efficient equivalent transformation of the former // realized by a structural reparameterization technique, where 3x3 and 1x1 convolutions // along with identity branches and batchnorm layers are fused into a single 3x3 convolution. fn repvgg_layer( has_identity: bool, dim: usize, stride: usize, in_channels: usize, out_channels: usize, groups: usize, vb: VarBuilder, ) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride, groups, padding: 1, ..Default::default() }; // read and reparameterize the 1x1 conv and bn into w1 and b1 // based on https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L543 let conv1x1_bn = batch_norm(dim, 1e-5, vb.pp("conv_1x1.bn"))?; let conv1x1 = conv2d_no_bias( in_channels, out_channels, 1, conv2d_cfg, vb.pp("conv_1x1.conv"), )?; let (mut w1, b1) = fuse_conv_bn(conv1x1.weight(), conv1x1_bn)?; // resize to 3x3 w1 = w1.pad_with_zeros(D::Minus1, 1, 1)?; w1 = w1.pad_with_zeros(D::Minus2, 1, 1)?; // read and reparameterize the 3x3 conv and bn into w3 and b3 let convkxk_bn = batch_norm(dim, 1e-5, vb.pp("conv_kxk.bn"))?; let conv3x3 = conv2d_no_bias( in_channels, out_channels, 3, conv2d_cfg, vb.pp("conv_kxk.conv"), )?; let (w3, b3) = fuse_conv_bn(conv3x3.weight(), convkxk_bn)?; let mut w = (w1 + w3)?; let mut b = (b1 + b3)?; // read and reparameterize the identity bn into wi and bi if has_identity { let identity_bn = batch_norm(dim, 1e-5, vb.pp("identity"))?; // create a 3x3 convolution equivalent to the identity branch let mut weights: Vec<f32> = vec![0.0; conv3x3.weight().elem_count()]; // https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/byobnet.py#L620 let in_dim = in_channels / groups; for i in 0..in_channels { weights[i * in_dim * 3 * 3 + (i % in_dim) * 3 * 3 + 4] = 1.0; } let weights = &Tensor::from_vec(weights, w.shape(), w.device())?; let (wi, bi) = fuse_conv_bn(weights, identity_bn)?; w = (w + wi)?; b = (b + bi)?; } // create the 3x3 conv equivalent to the sum of 3x3, 1x1 and identity branches let reparam_conv = Conv2d::new(w, Some(b), conv2d_cfg); Ok(Func::new(move |xs| { let xs = xs.apply(&reparam_conv)?.relu()?; Ok(xs) })) } // Get the number of output channels per stage taking into account the multipliers fn output_channels_per_stage(a: f32, b: f32, stage: usize) -> usize { let channels = CHANNELS_PER_STAGE[stage] as f32; match stage { 0 => std::cmp::min(64, (channels * a) as usize), 4 => (channels * b) as usize, _ => (channels * a) as usize, } } // Each stage is made of layers. The first layer always downsamples with stride 2. // All but the first layer have a residual connection. // The G4 variants have a groupwise convolution instead of a dense one on odd layers // counted across stage boundaries, so we keep track of which layer we are in the // full model. fn repvgg_stage(cfg: &Config, idx: usize, vb: VarBuilder) -> Result<Func<'static>> { let nlayers = cfg.stages[idx - 1]; let mut layers = Vec::with_capacity(nlayers); let prev_layers: usize = cfg.stages[..idx - 1].iter().sum(); let out_channels_prev = output_channels_per_stage(cfg.a, cfg.b, idx - 1); let out_channels = output_channels_per_stage(cfg.a, cfg.b, idx); for layer_idx in 0..nlayers { let (has_identity, stride, in_channels) = if layer_idx == 0 { (false, 2, out_channels_prev) } else { (true, 1, out_channels) }; let groups = if (prev_layers + layer_idx) % 2 == 1 { cfg.groups } else { 1 }; layers.push(repvgg_layer( has_identity, out_channels, stride, in_channels, out_channels, groups, vb.pp(layer_idx), )?) } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)? } Ok(xs) })) } // Build a RepVGG model for a given configuration. fn repvgg_model(config: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> { let cls = match nclasses { None => None, Some(nclasses) => { let outputs = output_channels_per_stage(config.a, config.b, 4); let linear = linear(outputs, nclasses, vb.pp("head.fc"))?; Some(linear) } }; let stem_dim = output_channels_per_stage(config.a, config.b, 0); let stem = repvgg_layer(false, stem_dim, 2, 3, stem_dim, 1, vb.pp("stem"))?; let vb = vb.pp("stages"); let stage1 = repvgg_stage(config, 1, vb.pp(0))?; let stage2 = repvgg_stage(config, 2, vb.pp(1))?; let stage3 = repvgg_stage(config, 3, vb.pp(2))?; let stage4 = repvgg_stage(config, 4, vb.pp(3))?; Ok(Func::new(move |xs| { let xs = xs .apply(&stem)? .apply(&stage1)? .apply(&stage2)? .apply(&stage3)? .apply(&stage4)? .mean(D::Minus1)? .mean(D::Minus1)?; match &cls { None => Ok(xs), Some(cls) => xs.apply(cls), } })) } pub fn repvgg(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { repvgg_model(cfg, Some(nclasses), vb) } pub fn repvgg_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { repvgg_model(cfg, None, vb) }
candle/candle-transformers/src/models/repvgg.rs/0
{ "file_path": "candle/candle-transformers/src/models/repvgg.rs", "repo_id": "candle", "token_count": 4487 }
use super::schedulers::{betas_for_alpha_bar, BetaSchedule, PredictionType}; use candle::{Result, Tensor}; #[derive(Debug, Clone, PartialEq, Eq)] pub enum DDPMVarianceType { FixedSmall, FixedSmallLog, FixedLarge, FixedLargeLog, Learned, } impl Default for DDPMVarianceType { fn default() -> Self { Self::FixedSmall } } #[derive(Debug, Clone)] pub struct DDPMSchedulerConfig { /// The value of beta at the beginning of training. pub beta_start: f64, /// The value of beta at the end of training. pub beta_end: f64, /// How beta evolved during training. pub beta_schedule: BetaSchedule, /// Option to predicted sample between -1 and 1 for numerical stability. pub clip_sample: bool, /// Option to clip the variance used when adding noise to the denoised sample. pub variance_type: DDPMVarianceType, /// prediction type of the scheduler function pub prediction_type: PredictionType, /// number of diffusion steps used to train the model. pub train_timesteps: usize, } impl Default for DDPMSchedulerConfig { fn default() -> Self { Self { beta_start: 0.00085, beta_end: 0.012, beta_schedule: BetaSchedule::ScaledLinear, clip_sample: false, variance_type: DDPMVarianceType::FixedSmall, prediction_type: PredictionType::Epsilon, train_timesteps: 1000, } } } pub struct DDPMScheduler { alphas_cumprod: Vec<f64>, init_noise_sigma: f64, timesteps: Vec<usize>, step_ratio: usize, pub config: DDPMSchedulerConfig, } impl DDPMScheduler { pub fn new(inference_steps: usize, config: DDPMSchedulerConfig) -> Result<Self> { let betas = match config.beta_schedule { BetaSchedule::ScaledLinear => super::utils::linspace( config.beta_start.sqrt(), config.beta_end.sqrt(), config.train_timesteps, )? .sqr()?, BetaSchedule::Linear => { super::utils::linspace(config.beta_start, config.beta_end, config.train_timesteps)? } BetaSchedule::SquaredcosCapV2 => betas_for_alpha_bar(config.train_timesteps, 0.999)?, }; let betas = betas.to_vec1::<f64>()?; let mut alphas_cumprod = Vec::with_capacity(betas.len()); for &beta in betas.iter() { let alpha = 1.0 - beta; alphas_cumprod.push(alpha * *alphas_cumprod.last().unwrap_or(&1f64)) } // min(train_timesteps, inference_steps) // https://github.com/huggingface/diffusers/blob/8331da46837be40f96fbd24de6a6fb2da28acd11/src/diffusers/schedulers/scheduling_ddpm.py#L187 let inference_steps = inference_steps.min(config.train_timesteps); // arange the number of the scheduler's timesteps let step_ratio = config.train_timesteps / inference_steps; let timesteps: Vec<usize> = (0..inference_steps).map(|s| s * step_ratio).rev().collect(); Ok(Self { alphas_cumprod, init_noise_sigma: 1.0, timesteps, step_ratio, config, }) } fn get_variance(&self, timestep: usize) -> f64 { let prev_t = timestep as isize - self.step_ratio as isize; let alpha_prod_t = self.alphas_cumprod[timestep]; let alpha_prod_t_prev = if prev_t >= 0 { self.alphas_cumprod[prev_t as usize] } else { 1.0 }; let current_beta_t = 1. - alpha_prod_t / alpha_prod_t_prev; // For t > 0, compute predicted variance βt (see formula (6) and (7) from [the pdf](https://arxiv.org/pdf/2006.11239.pdf)) // and sample from it to get previous sample // x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample let variance = (1. - alpha_prod_t_prev) / (1. - alpha_prod_t) * current_beta_t; // retrieve variance match self.config.variance_type { DDPMVarianceType::FixedSmall => variance.max(1e-20), // for rl-diffuser https://arxiv.org/abs/2205.09991 DDPMVarianceType::FixedSmallLog => { let variance = variance.max(1e-20).ln(); (variance * 0.5).exp() } DDPMVarianceType::FixedLarge => current_beta_t, DDPMVarianceType::FixedLargeLog => current_beta_t.ln(), DDPMVarianceType::Learned => variance, } } pub fn timesteps(&self) -> &[usize] { self.timesteps.as_slice() } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. pub fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Tensor { sample } pub fn step(&self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> { let prev_t = timestep as isize - self.step_ratio as isize; // https://github.com/huggingface/diffusers/blob/df2b548e893ccb8a888467c2508756680df22821/src/diffusers/schedulers/scheduling_ddpm.py#L272 // 1. compute alphas, betas let alpha_prod_t = self.alphas_cumprod[timestep]; let alpha_prod_t_prev = if prev_t >= 0 { self.alphas_cumprod[prev_t as usize] } else { 1.0 }; let beta_prod_t = 1. - alpha_prod_t; let beta_prod_t_prev = 1. - alpha_prod_t_prev; let current_alpha_t = alpha_prod_t / alpha_prod_t_prev; let current_beta_t = 1. - current_alpha_t; // 2. compute predicted original sample from predicted noise also called "predicted x_0" of formula (15) let mut pred_original_sample = match self.config.prediction_type { PredictionType::Epsilon => { ((sample - model_output * beta_prod_t.sqrt())? / alpha_prod_t.sqrt())? } PredictionType::Sample => model_output.clone(), PredictionType::VPrediction => { ((sample * alpha_prod_t.sqrt())? - model_output * beta_prod_t.sqrt())? } }; // 3. clip predicted x_0 if self.config.clip_sample { pred_original_sample = pred_original_sample.clamp(-1f32, 1f32)?; } // 4. Compute coefficients for pred_original_sample x_0 and current sample x_t // See formula (7) from https://arxiv.org/pdf/2006.11239.pdf let pred_original_sample_coeff = (alpha_prod_t_prev.sqrt() * current_beta_t) / beta_prod_t; let current_sample_coeff = current_alpha_t.sqrt() * beta_prod_t_prev / beta_prod_t; // 5. Compute predicted previous sample µ_t // See formula (7) from https://arxiv.org/pdf/2006.11239.pdf let pred_prev_sample = ((&pred_original_sample * pred_original_sample_coeff)? + sample * current_sample_coeff)?; // https://github.com/huggingface/diffusers/blob/df2b548e893ccb8a888467c2508756680df22821/src/diffusers/schedulers/scheduling_ddpm.py#L305 // 6. Add noise let mut variance = model_output.zeros_like()?; if timestep > 0 { let variance_noise = model_output.randn_like(0., 1.)?; if self.config.variance_type == DDPMVarianceType::FixedSmallLog { variance = (variance_noise * self.get_variance(timestep))?; } else { variance = (variance_noise * self.get_variance(timestep).sqrt())?; } } &pred_prev_sample + variance } pub fn add_noise( &self, original_samples: &Tensor, noise: Tensor, timestep: usize, ) -> Result<Tensor> { (original_samples * self.alphas_cumprod[timestep].sqrt())? + noise * (1. - self.alphas_cumprod[timestep]).sqrt() } pub fn init_noise_sigma(&self) -> f64 { self.init_noise_sigma } }
candle/candle-transformers/src/models/stable_diffusion/ddpm.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/ddpm.rs", "repo_id": "candle", "token_count": 3666 }
//! VGG-16 model implementation. //! //! VGG-16 is a convolutional neural network architecture. It consists of 13 //! convolutional layers followed by 3 fully connected layers. //! //! Key characteristics: //! - Conv layers with 3x3 filters //! - Max pooling after every 2-3 conv layers //! - Three fully connected layers of 4096, 4096, 1000 units //! - ReLU activation and dropout //! //! References: //! - [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) //! use candle::{ModuleT, Result, Tensor}; use candle_nn::{FuncT, VarBuilder}; // Enum representing the different VGG models pub enum Models { Vgg13, Vgg16, Vgg19, } // Struct representing a VGG model #[derive(Debug)] pub struct Vgg<'a> { blocks: Vec<FuncT<'a>>, } // Struct representing the configuration for the pre-logit layer struct PreLogitConfig { in_dim: (usize, usize, usize, usize), target_in: usize, target_out: usize, } // Implementation of the VGG model impl<'a> Vgg<'a> { // Function to create a new VGG model pub fn new(vb: VarBuilder<'a>, model: Models) -> Result<Self> { let blocks = match model { Models::Vgg13 => vgg13_blocks(vb)?, Models::Vgg16 => vgg16_blocks(vb)?, Models::Vgg19 => vgg19_blocks(vb)?, }; Ok(Self { blocks }) } } // Implementation of the forward pass for the VGG model impl ModuleT for Vgg<'_> { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> { let mut xs = xs.unsqueeze(0)?; for block in self.blocks.iter() { xs = xs.apply_t(block, train)?; } Ok(xs) } } // Function to create a conv2d block // The block is composed of two conv2d layers followed by a max pool layer fn conv2d_block(convs: &[(usize, usize, &str)], vb: &VarBuilder) -> Result<FuncT<'static>> { let layers = convs .iter() .map(|&(in_c, out_c, name)| { candle_nn::conv2d( in_c, out_c, 3, candle_nn::Conv2dConfig { stride: 1, padding: 1, ..Default::default() }, vb.pp(name), ) }) .collect::<Result<Vec<_>>>()?; Ok(FuncT::new(move |xs, _train| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)?.relu()? } xs = xs.max_pool2d_with_stride(2, 2)?; Ok(xs) })) } // Function to create a fully connected layer // The layer is composed of two linear layers followed by a dropout layer fn fully_connected( num_classes: usize, pre_logit_1: PreLogitConfig, pre_logit_2: PreLogitConfig, vb: VarBuilder, ) -> Result<FuncT> { let lin = get_weights_and_biases( &vb.pp("pre_logits.fc1"), pre_logit_1.in_dim, pre_logit_1.target_in, pre_logit_1.target_out, )?; let lin2 = get_weights_and_biases( &vb.pp("pre_logits.fc2"), pre_logit_2.in_dim, pre_logit_2.target_in, pre_logit_2.target_out, )?; let dropout1 = candle_nn::Dropout::new(0.5); let dropout2 = candle_nn::Dropout::new(0.5); let dropout3 = candle_nn::Dropout::new(0.5); Ok(FuncT::new(move |xs, train| { let xs = xs.reshape((1, pre_logit_1.target_out))?; let xs = xs.apply_t(&dropout1, train)?.apply(&lin)?.relu()?; let xs = xs.apply_t(&dropout2, train)?.apply(&lin2)?.relu()?; let lin3 = candle_nn::linear(4096, num_classes, vb.pp("head.fc"))?; let xs = xs.apply_t(&dropout3, train)?.apply(&lin3)?.relu()?; Ok(xs) })) } // Function to get the weights and biases for a layer // This is required because the weights and biases are stored in different format than our linear layer expects fn get_weights_and_biases( vs: &VarBuilder, in_dim: (usize, usize, usize, usize), target_in: usize, target_out: usize, ) -> Result<candle_nn::Linear> { let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL; let ws = vs.get_with_hints(in_dim, "weight", init_ws)?; let ws = ws.reshape((target_in, target_out))?; let bound = 1. / (target_out as f64).sqrt(); let init_bs = candle_nn::Init::Uniform { lo: -bound, up: bound, }; let bs = vs.get_with_hints(target_in, "bias", init_bs)?; Ok(candle_nn::Linear::new(ws, Some(bs))) } fn vgg13_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block(&[(128, 256, "features.10"), (256, 256, "features.12")], &vb)?, conv2d_block(&[(256, 512, "features.15"), (512, 512, "features.17")], &vb)?, conv2d_block(&[(512, 512, "features.20"), (512, 512, "features.22")], &vb)?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) } fn vgg16_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block( &[ (128, 256, "features.10"), (256, 256, "features.12"), (256, 256, "features.14"), ], &vb, )?, conv2d_block( &[ (256, 512, "features.17"), (512, 512, "features.19"), (512, 512, "features.21"), ], &vb, )?, conv2d_block( &[ (512, 512, "features.24"), (512, 512, "features.26"), (512, 512, "features.28"), ], &vb, )?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) } fn vgg19_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block( &[ (128, 256, "features.10"), (256, 256, "features.12"), (256, 256, "features.14"), (256, 256, "features.16"), ], &vb, )?, conv2d_block( &[ (256, 512, "features.19"), (512, 512, "features.21"), (512, 512, "features.23"), (512, 512, "features.25"), ], &vb, )?, conv2d_block( &[ (512, 512, "features.28"), (512, 512, "features.30"), (512, 512, "features.32"), (512, 512, "features.34"), ], &vb, )?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) }
candle/candle-transformers/src/models/vgg.rs/0
{ "file_path": "candle/candle-transformers/src/models/vgg.rs", "repo_id": "candle", "token_count": 4390 }
//! Bounding Boxes and Intersection //! //! This module provides functionality for handling bounding boxes and their manipulation, //! particularly in the context of object detection. It includes tools for calculating //! intersection over union (IoU) and non-maximum suppression (NMS). /// A bounding box around an object. #[derive(Debug, Clone)] pub struct Bbox<D> { pub xmin: f32, pub ymin: f32, pub xmax: f32, pub ymax: f32, pub confidence: f32, pub data: D, } #[derive(Debug, Clone, Copy, PartialEq)] pub struct KeyPoint { pub x: f32, pub y: f32, pub mask: f32, } /// Intersection over union of two bounding boxes. pub fn iou<D>(b1: &Bbox<D>, b2: &Bbox<D>) -> f32 { let b1_area = (b1.xmax - b1.xmin + 1.) * (b1.ymax - b1.ymin + 1.); let b2_area = (b2.xmax - b2.xmin + 1.) * (b2.ymax - b2.ymin + 1.); let i_xmin = b1.xmin.max(b2.xmin); let i_xmax = b1.xmax.min(b2.xmax); let i_ymin = b1.ymin.max(b2.ymin); let i_ymax = b1.ymax.min(b2.ymax); let i_area = (i_xmax - i_xmin + 1.).max(0.) * (i_ymax - i_ymin + 1.).max(0.); i_area / (b1_area + b2_area - i_area) } pub fn non_maximum_suppression<D>(bboxes: &mut [Vec<Bbox<D>>], threshold: f32) { // Perform non-maximum suppression. for bboxes_for_class in bboxes.iter_mut() { bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap()); let mut current_index = 0; for index in 0..bboxes_for_class.len() { let mut drop = false; for prev_index in 0..current_index { let iou = iou(&bboxes_for_class[prev_index], &bboxes_for_class[index]); if iou > threshold { drop = true; break; } } if !drop { bboxes_for_class.swap(current_index, index); current_index += 1; } } bboxes_for_class.truncate(current_index); } } // Updates confidences starting at highest and comparing subsequent boxes. fn update_confidences<D>( bboxes_for_class: &[Bbox<D>], updated_confidences: &mut [f32], iou_threshold: f32, sigma: f32, ) { let len = bboxes_for_class.len(); for current_index in 0..len { let current_bbox = &bboxes_for_class[current_index]; for index in (current_index + 1)..len { let iou_val = iou(current_bbox, &bboxes_for_class[index]); if iou_val > iou_threshold { // Decay calculation from page 4 of: https://arxiv.org/pdf/1704.04503 let decay = (-iou_val * iou_val / sigma).exp(); let updated_confidence = bboxes_for_class[index].confidence * decay; updated_confidences[index] = updated_confidence; } } } } // Sorts the bounding boxes by confidence and applies soft non-maximum suppression. // This function is based on the algorithm described in https://arxiv.org/pdf/1704.04503 pub fn soft_non_maximum_suppression<D>( bboxes: &mut [Vec<Bbox<D>>], iou_threshold: Option<f32>, confidence_threshold: Option<f32>, sigma: Option<f32>, ) { let iou_threshold = iou_threshold.unwrap_or(0.5); let confidence_threshold = confidence_threshold.unwrap_or(0.1); let sigma = sigma.unwrap_or(0.5); for bboxes_for_class in bboxes.iter_mut() { // Sort boxes by confidence in descending order bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap()); let mut updated_confidences = bboxes_for_class .iter() .map(|bbox| bbox.confidence) .collect::<Vec<_>>(); update_confidences( bboxes_for_class, &mut updated_confidences, iou_threshold, sigma, ); // Update confidences, set to 0.0 if below threshold for (i, &confidence) in updated_confidences.iter().enumerate() { bboxes_for_class[i].confidence = if confidence < confidence_threshold { 0.0 } else { confidence }; } } }
candle/candle-transformers/src/object_detection.rs/0
{ "file_path": "candle/candle-transformers/src/object_detection.rs", "repo_id": "candle", "token_count": 1950 }
use candle::{Device, Tensor}; use candle_transformers::generation::LogitsProcessor; use candle_wasm_example_llama2::worker::{Model as M, ModelData}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Model { inner: M, logits_processor: LogitsProcessor, tokens: Vec<u32>, repeat_penalty: f32, } impl Model { fn process(&mut self, tokens: &[u32]) -> candle::Result<String> { const REPEAT_LAST_N: usize = 64; let dev = Device::Cpu; let input = Tensor::new(tokens, &dev)?.unsqueeze(0)?; let logits = self.inner.llama.forward(&input, tokens.len())?; let logits = logits.squeeze(0)?; let logits = if self.repeat_penalty == 1. || tokens.is_empty() { logits } else { let start_at = self.tokens.len().saturating_sub(REPEAT_LAST_N); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &self.tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; self.tokens.push(next_token); let text = match self.inner.tokenizer.id_to_token(next_token) { Some(text) => text.replace('▁', " ").replace("<0x0A>", "\n"), None => "".to_string(), }; Ok(text) } } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn new(weights: Vec<u8>, tokenizer: Vec<u8>) -> Result<Model, JsError> { let model = M::load(ModelData { tokenizer, model: weights, }); let logits_processor = LogitsProcessor::new(299792458, None, None); match model { Ok(inner) => Ok(Self { inner, logits_processor, tokens: vec![], repeat_penalty: 1., }), Err(e) => Err(JsError::new(&e.to_string())), } } #[wasm_bindgen] pub fn get_seq_len(&mut self) -> usize { self.inner.config.seq_len } #[wasm_bindgen] pub fn init_with_prompt( &mut self, prompt: String, temp: f64, top_p: f64, repeat_penalty: f32, seed: u64, ) -> Result<String, JsError> { // First reset the cache. { let mut cache = self.inner.cache.kvs.lock().unwrap(); for elem in cache.iter_mut() { *elem = None } } let temp = if temp <= 0. { None } else { Some(temp) }; let top_p = if top_p <= 0. || top_p >= 1. { None } else { Some(top_p) }; self.logits_processor = LogitsProcessor::new(seed, temp, top_p); self.repeat_penalty = repeat_penalty; self.tokens.clear(); let tokens = self .inner .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let text = self .process(&tokens) .map_err(|m| JsError::new(&m.to_string()))?; Ok(text) } #[wasm_bindgen] pub fn next_token(&mut self) -> Result<String, JsError> { let last_token = *self.tokens.last().unwrap(); let text = self .process(&[last_token]) .map_err(|m| JsError::new(&m.to_string()))?; Ok(text) } } fn main() {}
candle/candle-wasm-examples/llama2-c/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/bin/m.rs", "repo_id": "candle", "token_count": 1807 }
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Phi 1.5 / Phi 2.0 Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/highlightjs/[email protected]/build/styles/default.min.css" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } code, output, select, pre { font-family: "Source Code Pro", monospace; } </style> <style type="text/tailwindcss"> .link { @apply underline hover:text-blue-500 hover:no-underline; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> import snarkdown from "https://cdn.skypack.dev/snarkdown"; import hljs from "https://cdn.skypack.dev/highlight.js"; // models base url const MODELS = { phi_1_5_q4k: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-q4k.gguf", tokenizer: "tokenizer.json", config: "phi-1_5.json", quantized: true, seq_len: 2048, size: "800 MB", }, phi_1_5_q80: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-q80.gguf", tokenizer: "tokenizer.json", config: "phi-1_5.json", quantized: true, seq_len: 2048, size: "1.51 GB", }, phi_2_0_q4k: { base_url: "https://huggingface.co/radames/phi-2-quantized/resolve/main/", model: [ "model-v2-q4k.gguf_aa.part", "model-v2-q4k.gguf_ab.part", "model-v2-q4k.gguf_ac.part", ], tokenizer: "tokenizer.json", config: "config.json", quantized: true, seq_len: 2048, size: "1.57GB", }, puffin_phi_v2_q4k: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-puffin-phi-v2-q4k.gguf", tokenizer: "tokenizer-puffin-phi-v2.json", config: "puffin-phi-v2.json", quantized: true, seq_len: 2048, size: "798 MB", }, puffin_phi_v2_q80: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-puffin-phi-v2-q80.gguf", tokenizer: "tokenizer-puffin-phi-v2.json", config: "puffin-phi-v2.json", quantized: true, seq_len: 2048, size: "1.50 GB", }, }; const TEMPLATES = [ { title: "Simple prompt", prompt: `Sebastien is in London today, it’s the middle of July yet it’s raining, so Sebastien is feeling gloomy. He`, }, { title: "Think step by step", prompt: `Suppose Alice originally had 3 apples, then Bob gave Alice 7 apples, then Alice gave Cook 5 apples, and then Tim gave Alice 3x the amount of apples Alice had. How many apples does Alice have now? Let’s think step by step.`, }, { title: "Explaing a code snippet", prompt: `What does this script do? \`\`\`python s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) s.listen(1) conn, addr = s.accept() print('Connected by', addr) return conn.getsockname()[1] \`\`\` Let’s think step by step.`, }, { title: "Question answering", prompt: `Instruct: What is the capital of France? Output:`, }, { title: "Chat mode", prompt: `Alice: Can you tell me how to create a python application to go through all the files in one directory where the file’s name DOES NOT end with '.json'? Bob:`, }, { title: "Python code completion", prompt: `"""write a python function called batch(function, list) which call function(x) for x in list in parallel""" Solution:`, }, { title: "Python Sample", prompt: `"""Can you make sure those histograms appear side by side on the same plot: \`\`\`python plt.hist(intreps_retrained[0][1].view(64,-1).norm(dim=1).detach().cpu().numpy(), bins = 20) plt.hist(intreps_pretrained[0][1].view(64,-1).norm(dim=1).detach().cpu().numpy(), bins = 20) \`\`\` """`, }, { title: "Write a Twitter post", prompt: `Write a twitter post for the discovery of gravitational wave. Twitter Post:`, }, { title: "Write a review", prompt: `Write a polite review complaining that the video game 'Random Game' was too badly optimized and it burned my laptop. Very polite review:`, }, ]; const phiWorker = new Worker("./phiWorker.js", { type: "module", }); async function generateSequence(controller) { const getValue = (id) => document.querySelector(`#${id}`).value; const modelID = getValue("model"); const model = MODELS[modelID]; const weightsURL = model.model instanceof Array ? model.model.map((m) => model.base_url + m) : model.base_url + model.model; const tokenizerURL = model.base_url + model.tokenizer; const configURL = model.base_url + model.config; const prompt = getValue("prompt").trim(); const temperature = getValue("temperature"); const topP = getValue("top-p"); const repeatPenalty = getValue("repeat_penalty"); const seed = getValue("seed"); const maxSeqLen = getValue("max-seq"); function updateStatus(data) { const outStatus = document.querySelector("#output-status"); const outGen = document.querySelector("#output-generation"); const outCounter = document.querySelector("#output-counter"); switch (data.status) { case "loading": outStatus.hidden = false; outStatus.textContent = data.message; outGen.hidden = true; outCounter.hidden = true; break; case "generating": const { message, prompt, sentence, tokensSec, totalTime } = data; outStatus.hidden = true; outCounter.hidden = false; outGen.hidden = false; outGen.innerHTML = snarkdown(prompt + sentence); outCounter.innerHTML = `${(totalTime / 1000).toFixed( 2 )}s (${tokensSec.toFixed(2)} tok/s)`; hljs.highlightAll(); break; case "complete": outStatus.hidden = true; outGen.hidden = false; break; } } return new Promise((resolve, reject) => { phiWorker.postMessage({ weightsURL, modelID, tokenizerURL, configURL, quantized: model.quantized, prompt, temp: temperature, top_p: topP, repeatPenalty, seed: seed, maxSeqLen, command: "start", }); const handleAbort = () => { phiWorker.postMessage({ command: "abort" }); }; const handleMessage = (event) => { const { status, error, message, prompt, sentence } = event.data; if (status) updateStatus(event.data); if (error) { phiWorker.removeEventListener("message", handleMessage); reject(new Error(error)); } if (status === "aborted") { phiWorker.removeEventListener("message", handleMessage); resolve(event.data); } if (status === "complete") { phiWorker.removeEventListener("message", handleMessage); resolve(event.data); } }; controller.signal.addEventListener("abort", handleAbort); phiWorker.addEventListener("message", handleMessage); }); } const form = document.querySelector("#form"); const prompt = document.querySelector("#prompt"); const clearBtn = document.querySelector("#clear-btn"); const runBtn = document.querySelector("#run"); const modelSelect = document.querySelector("#model"); const promptTemplates = document.querySelector("#prompt-templates"); let runController = new AbortController(); let isRunning = false; document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelSelect.appendChild(option); } const query = new URLSearchParams(window.location.search); const modelID = query.get("model"); if (modelID) { modelSelect.value = modelID; } else { modelSelect.value = "phi_1_5_q4k"; } for (const [i, { title, prompt }] of TEMPLATES.entries()) { const div = document.createElement("div"); const input = document.createElement("input"); input.type = "radio"; input.name = "task"; input.id = `templates-${i}`; input.classList.add("font-light", "cursor-pointer"); input.value = prompt; const label = document.createElement("label"); label.htmlFor = `templates-${i}`; label.classList.add("cursor-pointer"); label.innerText = title; div.appendChild(input); div.appendChild(label); promptTemplates.appendChild(div); } }); promptTemplates.addEventListener("change", (e) => { const template = e.target.value; prompt.value = template; prompt.style.height = "auto"; prompt.style.height = prompt.scrollHeight + "px"; runBtn.disabled = false; clearBtn.classList.remove("invisible"); }); modelSelect.addEventListener("change", (e) => { const query = new URLSearchParams(window.location.search); query.set("model", e.target.value); window.history.replaceState( {}, "", `${window.location.pathname}?${query}` ); window.parent.postMessage({ queryString: "?" + query }, "*"); const model = MODELS[e.target.value]; document.querySelector("#max-seq").max = model.seq_len; document.querySelector("#max-seq").nextElementSibling.value = 200; }); form.addEventListener("submit", async (e) => { e.preventDefault(); if (isRunning) { stopRunning(); } else { startRunning(); await generateSequence(runController); stopRunning(); } }); function startRunning() { isRunning = true; runBtn.textContent = "Stop"; } function stopRunning() { runController.abort(); runController = new AbortController(); runBtn.textContent = "Run"; isRunning = false; } clearBtn.addEventListener("click", (e) => { e.preventDefault(); prompt.value = ""; clearBtn.classList.add("invisible"); runBtn.disabled = true; stopRunning(); }); prompt.addEventListener("input", (e) => { runBtn.disabled = false; if (e.target.value.length > 0) { clearBtn.classList.remove("invisible"); } else { clearBtn.classList.add("invisible"); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4 text-gray-800"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle Phi 1.5 / Phi 2.0</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> The <a href="https://huggingface.co/microsoft/phi-1_5" class="link" target="_blank" >Phi-1.5</a > and <a href="https://huggingface.co/microsoft/phi-2" class="link" target="_blank" >Phi-2</a > models achieve state-of-the-art performance with only 1.3 billion and 2.7 billion parameters, compared to larger models with up to 13 billion parameters. Here you can try the quantized versions. Additional prompt examples are available in the <a href="https://arxiv.org/pdf/2309.05463.pdf#page=8" class="link" target="_blank" > technical report </a >. </p> <p class="max-w-lg"> You can also try <a href="https://huggingface.co/teknium/Puffin-Phi-v2" class="link" target="_blank" >Puffin-Phi V2 </a> quantized version, a fine-tuned version of Phi-1.5 on the <a href="https://huggingface.co/datasets/LDJnr/Puffin" class="link" target="_blank" >Puffin dataset </a> </p> </div> <div> <p class="text-xs italic max-w-lg"> <b>Note:</b> When first run, the app will download and cache the model, which could take a few minutes. The models are <b>~800MB</b> or <b>~1.57GB</b> in size. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light" ></select> </div> <div> <details> <summary class="font-medium cursor-pointer">Prompt Templates</summary> <form id="prompt-templates" class="grid grid-cols-1 sm:grid-cols-2 gap-1 my-2" ></form> </details> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center" > <input type="submit" hidden /> <textarea type="text" id="prompt" class="font-light text-lg w-full px-3 py-2 mx-1 resize-none outline-none" oninput="this.style.height = 0;this.style.height = this.scrollHeight + 'px'" placeholder="Add your prompt here..." > Instruct: Write a detailed analogy between mathematics and a lighthouse. Output:</textarea > <button id="clear-btn"> <svg fill="none" xmlns="http://www.w3.org/2000/svg" width="40" viewBox="0 0 70 40" > <path opacity=".5" d="M39 .2v40.2" stroke="#1F2937" /> <path d="M1.5 11.5 19 29.1m0-17.6L1.5 29.1" opacity=".5" stroke="#1F2937" stroke-width="2" /> </svg> </button> <button id="run" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed" > Run </button> </form> <details> <summary class="font-medium cursor-pointer">Advanced Options</summary> <div class="grid grid-cols-3 max-w-md items-center gap-3 py-3"> <label class="text-sm font-medium" for="max-seq" >Maximum length </label> <input type="range" id="max-seq" name="max-seq" min="1" max="2048" step="1" value="200" oninput="this.nextElementSibling.value = Number(this.value)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" > 200</output > <label class="text-sm font-medium" for="temperature" >Temperature</label > <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" > 0.00</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" > 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm" > Rand </button> </div> </details> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2" > <div id="output-counter" hidden class="ml-auto font-semibold grid-rows-1" ></div> <p hidden id="output-generation" class="grid-rows-2 text-lg"></p> <span id="output-status" class="m-auto font-light" >No output yet</span > </div> </div> </main> </body> </html>
candle/candle-wasm-examples/phi/index.html/0
{ "file_path": "candle/candle-wasm-examples/phi/index.html", "repo_id": "candle", "token_count": 9818 }
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle T5</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <style type="text/tailwindcss"> .link { @apply underline hover:text-blue-500 hover:no-underline; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> import { getModelInfo, MODELS, extractEmbeddings, generateText, } from "./utils.js"; const t5ModelEncoderWorker = new Worker("./T5ModelEncoderWorker.js", { type: "module", }); const t5ModelConditionalGeneration = new Worker( "./T5ModelConditionalGeneration.js", { type: "module" } ); const formEl = document.querySelector("#form"); const modelEl = document.querySelector("#model"); const promptEl = document.querySelector("#prompt"); const temperatureEl = document.querySelector("#temperature"); const toppEL = document.querySelector("#top-p"); const repeatPenaltyEl = document.querySelector("#repeat_penalty"); const seedEl = document.querySelector("#seed"); const outputEl = document.querySelector("#output-generation"); const tasksEl = document.querySelector("#tasks"); let selectedTaskID = ""; document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelEl.appendChild(option); } populateTasks(modelEl.value); modelEl.addEventListener("change", (e) => { populateTasks(e.target.value); }); tasksEl.addEventListener("change", (e) => { const task = e.target.value; const modelID = modelEl.value; promptEl.value = MODELS[modelID].tasks[task].prefix; selectedTaskID = task; }); }); function populateTasks(modelID) { const tasks = MODELS[modelID].tasks; tasksEl.innerHTML = ""; for (const [task, params] of Object.entries(tasks)) { const div = document.createElement("div"); div.innerHTML = ` <input type="radio" name="task" id="${task}" class="font-light cursor-pointer" value="${task}" /> <label for="${task}" class="cursor-pointer"> ${params.prefix} </label> `; tasksEl.appendChild(div); } selectedTaskID = Object.keys(tasks)[0]; tasksEl.querySelector(`#${selectedTaskID}`).checked = true; } form.addEventListener("submit", (e) => { e.preventDefault(); const promptText = promptEl.value; const modelID = modelEl.value; const { modelURL, configURL, tokenizerURL, maxLength } = getModelInfo( modelID, selectedTaskID ); const params = { temperature: Number(temperatureEl.value), top_p: Number(toppEL.value), repetition_penalty: Number(repeatPenaltyEl.value), seed: BigInt(seedEl.value), max_length: maxLength, }; generateText( t5ModelConditionalGeneration, modelURL, tokenizerURL, configURL, modelID, promptText, params, (status) => { if (status.status === "loading") { outputEl.innerText = "Loading model..."; } if (status.status === "decoding") { outputEl.innerText = "Generating..."; } } ).then(({ output }) => { outputEl.innerText = output.generation; }); }); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle T5 Transformer</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> This demo showcase Text-To-Text Transfer Transformer (<a href="https://blog.research.google/2020/02/exploring-transfer-learning-with-t5.html" target="_blank" class="link" >T5</a >) models right in your browser, thanks to <a href="https://github.com/huggingface/candle/" target="_blank" class="link"> Candle </a> ML framework and rust/wasm. You can choose from a range of available models, including <a href="https://huggingface.co/t5-small" target="_blank" class="link"> t5-small</a >, <a href="https://huggingface.co/t5-base" target="_blank" class="link" >t5-base</a >, <a href="https://huggingface.co/google/flan-t5-small" target="_blank" class="link" >flan-t5-small</a >, several <a href="https://huggingface.co/lmz/candle-quantized-t5/tree/main" target="_blank" class="link"> t5 quantized gguf models</a >, and also a quantized <a href="https://huggingface.co/jbochi/candle-coedit-quantized/tree/main" target="_blank" class="link"> CoEdIT model for text rewrite</a >. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"></select> </div> <div> <h3 class="font-medium">Task Prefix:</h3> <form id="tasks" class="flex flex-col gap-1 my-2"></form> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> <input type="submit" hidden /> <input type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none" placeholder="Add prompt here, e.g. 'translate English to German: Today I'm going to eat Ice Cream'" value="translate English to German: Today I'm going to eat Ice Cream" /> <button class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Run </button> </form> <div class="grid grid-cols-3 max-w-md items-center gap-3"> <label class="text-sm font-medium" for="temperature">Temperature</label> <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 0.00</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = BigInt(Math.floor(Math.random() * 2**64-1))" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm"> Rand </button> </div> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2 text-lg"> <p id="output-generation" class="grid-rows-2">No output yet</p> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/t5/index.html/0
{ "file_path": "candle/candle-wasm-examples/t5/index.html", "repo_id": "candle", "token_count": 4724 }
pub const LANGUAGES: [(&str, &str); 99] = [ ("en", "english"), ("zh", "chinese"), ("de", "german"), ("es", "spanish"), ("ru", "russian"), ("ko", "korean"), ("fr", "french"), ("ja", "japanese"), ("pt", "portuguese"), ("tr", "turkish"), ("pl", "polish"), ("ca", "catalan"), ("nl", "dutch"), ("ar", "arabic"), ("sv", "swedish"), ("it", "italian"), ("id", "indonesian"), ("hi", "hindi"), ("fi", "finnish"), ("vi", "vietnamese"), ("he", "hebrew"), ("uk", "ukrainian"), ("el", "greek"), ("ms", "malay"), ("cs", "czech"), ("ro", "romanian"), ("da", "danish"), ("hu", "hungarian"), ("ta", "tamil"), ("no", "norwegian"), ("th", "thai"), ("ur", "urdu"), ("hr", "croatian"), ("bg", "bulgarian"), ("lt", "lithuanian"), ("la", "latin"), ("mi", "maori"), ("ml", "malayalam"), ("cy", "welsh"), ("sk", "slovak"), ("te", "telugu"), ("fa", "persian"), ("lv", "latvian"), ("bn", "bengali"), ("sr", "serbian"), ("az", "azerbaijani"), ("sl", "slovenian"), ("kn", "kannada"), ("et", "estonian"), ("mk", "macedonian"), ("br", "breton"), ("eu", "basque"), ("is", "icelandic"), ("hy", "armenian"), ("ne", "nepali"), ("mn", "mongolian"), ("bs", "bosnian"), ("kk", "kazakh"), ("sq", "albanian"), ("sw", "swahili"), ("gl", "galician"), ("mr", "marathi"), ("pa", "punjabi"), ("si", "sinhala"), ("km", "khmer"), ("sn", "shona"), ("yo", "yoruba"), ("so", "somali"), ("af", "afrikaans"), ("oc", "occitan"), ("ka", "georgian"), ("be", "belarusian"), ("tg", "tajik"), ("sd", "sindhi"), ("gu", "gujarati"), ("am", "amharic"), ("yi", "yiddish"), ("lo", "lao"), ("uz", "uzbek"), ("fo", "faroese"), ("ht", "haitian creole"), ("ps", "pashto"), ("tk", "turkmen"), ("nn", "nynorsk"), ("mt", "maltese"), ("sa", "sanskrit"), ("lb", "luxembourgish"), ("my", "myanmar"), ("bo", "tibetan"), ("tl", "tagalog"), ("mg", "malagasy"), ("as", "assamese"), ("tt", "tatar"), ("haw", "hawaiian"), ("ln", "lingala"), ("ha", "hausa"), ("ba", "bashkir"), ("jw", "javanese"), ("su", "sundanese"), ];
candle/candle-wasm-examples/whisper/src/languages.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/languages.rs", "repo_id": "candle", "token_count": 1175 }
use crate::model::{report_detect, report_pose, Bbox, Multiples, YoloV8, YoloV8Pose}; use candle::{DType, Device, Result, Tensor}; use candle_nn::{Module, VarBuilder}; use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; use yew_agent::{HandlerId, Public, WorkerLink}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string())) } // Communication to the worker happens through bincode, the model weights and configs are fetched // on the main thread and transferred via the following structure. #[derive(Serialize, Deserialize)] pub struct ModelData { pub weights: Vec<u8>, pub model_size: String, } #[derive(Serialize, Deserialize)] pub struct RunData { pub image_data: Vec<u8>, pub conf_threshold: f32, pub iou_threshold: f32, } pub struct Model { model: YoloV8, } impl Model { pub fn run( &self, image_data: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<Vec<Vec<Bbox>>> { console_log!("image data: {}", image_data.len()); let image_data = std::io::Cursor::new(image_data); let original_image = image::ImageReader::new(image_data) .with_guessed_format()? .decode() .map_err(candle::Error::wrap)?; let (width, height) = { let w = original_image.width() as usize; let h = original_image.height() as usize; if w < h { let w = w * 640 / h; // Sizes have to be divisible by 32. (w / 32 * 32, 640) } else { let h = h * 640 / w; (640, h / 32 * 32) } }; let image_t = { let img = original_image.resize_exact( width as u32, height as u32, image::imageops::FilterType::CatmullRom, ); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &Device::Cpu, )? .permute((2, 0, 1))? }; let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?; let predictions = self.model.forward(&image_t)?.squeeze(0)?; console_log!("generated predictions {predictions:?}"); let bboxes = report_detect( &predictions, original_image, width, height, conf_threshold, iou_threshold, )?; Ok(bboxes) } pub fn load_(weights: Vec<u8>, model_size: &str) -> Result<Self> { let multiples = match model_size { "n" => Multiples::n(), "s" => Multiples::s(), "m" => Multiples::m(), "l" => Multiples::l(), "x" => Multiples::x(), _ => Err(candle::Error::Msg( "invalid model size: must be n, s, m, l or x".to_string(), ))?, }; let dev = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?; let model = YoloV8::load(vb, multiples, 80)?; Ok(Self { model }) } pub fn load(md: ModelData) -> Result<Self> { Self::load_(md.weights, &md.model_size.to_string()) } } pub struct ModelPose { model: YoloV8Pose, } impl ModelPose { pub fn run( &self, image_data: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<Vec<Bbox>> { console_log!("image data: {}", image_data.len()); let image_data = std::io::Cursor::new(image_data); let original_image = image::ImageReader::new(image_data) .with_guessed_format()? .decode() .map_err(candle::Error::wrap)?; let (width, height) = { let w = original_image.width() as usize; let h = original_image.height() as usize; if w < h { let w = w * 640 / h; // Sizes have to be divisible by 32. (w / 32 * 32, 640) } else { let h = h * 640 / w; (640, h / 32 * 32) } }; let image_t = { let img = original_image.resize_exact( width as u32, height as u32, image::imageops::FilterType::CatmullRom, ); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &Device::Cpu, )? .permute((2, 0, 1))? }; let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?; let predictions = self.model.forward(&image_t)?.squeeze(0)?; console_log!("generated predictions {predictions:?}"); let bboxes = report_pose( &predictions, original_image, width, height, conf_threshold, iou_threshold, )?; Ok(bboxes) } pub fn load_(weights: Vec<u8>, model_size: &str) -> Result<Self> { let multiples = match model_size { "n" => Multiples::n(), "s" => Multiples::s(), "m" => Multiples::m(), "l" => Multiples::l(), "x" => Multiples::x(), _ => Err(candle::Error::Msg( "invalid model size: must be n, s, m, l or x".to_string(), ))?, }; let dev = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?; let model = YoloV8Pose::load(vb, multiples, 1, (17, 3))?; Ok(Self { model }) } pub fn load(md: ModelData) -> Result<Self> { Self::load_(md.weights, &md.model_size.to_string()) } } pub struct Worker { link: WorkerLink<Self>, model: Option<Model>, } #[derive(Serialize, Deserialize)] pub enum WorkerInput { ModelData(ModelData), RunData(RunData), } #[derive(Serialize, Deserialize)] pub enum WorkerOutput { ProcessingDone(std::result::Result<Vec<Vec<Bbox>>, String>), WeightsLoaded, } impl yew_agent::Worker for Worker { type Input = WorkerInput; type Message = (); type Output = std::result::Result<WorkerOutput, String>; type Reach = Public<Self>; fn create(link: WorkerLink<Self>) -> Self { Self { link, model: None } } fn update(&mut self, _msg: Self::Message) { // no messaging } fn handle_input(&mut self, msg: Self::Input, id: HandlerId) { let output = match msg { WorkerInput::ModelData(md) => match Model::load(md) { Ok(model) => { self.model = Some(model); Ok(WorkerOutput::WeightsLoaded) } Err(err) => Err(format!("model creation error {err:?}")), }, WorkerInput::RunData(rd) => match &mut self.model { None => Err("model has not been set yet".to_string()), Some(model) => { let result = model .run(rd.image_data, rd.conf_threshold, rd.iou_threshold) .map_err(|e| e.to_string()); Ok(WorkerOutput::ProcessingDone(result)) } }, }; self.link.respond(id, output); } fn name_of_resource() -> &'static str { "worker.js" } fn resource_path_is_relative() -> bool { true } }
candle/candle-wasm-examples/yolo/src/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/worker.rs", "repo_id": "candle", "token_count": 4075 }
- local: index title: 🤗 Chat UI - title: Installation sections: - local: installation/local title: Local - local: installation/spaces title: Spaces - local: installation/docker title: Docker - local: installation/helm title: Helm - title: Configuration sections: - local: configuration/overview title: Overview - local: configuration/theming title: Theming - local: configuration/open-id title: OpenID - local: configuration/web-search title: Web Search - local: configuration/metrics title: Metrics - local: configuration/embeddings title: Text Embedding Models - title: Models sections: - local: configuration/models/overview title: Overview - local: configuration/models/multimodal title: Multimodal - local: configuration/models/tools title: Tools - title: Providers sections: - local: configuration/models/providers/anthropic title: Anthropic - local: configuration/models/providers/aws title: AWS - local: configuration/models/providers/cloudflare title: Cloudflare - local: configuration/models/providers/cohere title: Cohere - local: configuration/models/providers/google title: Google - local: configuration/models/providers/langserve title: Langserve - local: configuration/models/providers/llamacpp title: Llama.cpp - local: configuration/models/providers/ollama title: Ollama - local: configuration/models/providers/openai title: OpenAI - local: configuration/models/providers/tgi title: TGI - local: configuration/common-issues title: Common Issues - title: Developing sections: - local: developing/architecture title: Architecture - local: developing/copy-huggingchat title: Copy HuggingChat
chat-ui/docs/source/_toctree.yml/0
{ "file_path": "chat-ui/docs/source/_toctree.yml", "repo_id": "chat-ui", "token_count": 879 }
# Tools Tool calling instructs the model to generate an output matching a user-defined schema, which may be parsed for invoking external tools. The model simply chooses the tools and their parameters. Currently, only `TGI` and `Cohere` with `Command R+` are supported. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/tools-light.png" height="auto"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/tools-dark.png" height="auto"/> </div> ## TGI Configuration A custom tokenizer is required for prompting the model for generating tool calls, as well as prompting with the results. The expected format for these tools and the resulting tool calls are hard coded for TGI, so it's likely that only the following configuration will work: ```ini MODELS=`[ { "name" : "CohereForAI/c4ai-command-r-plus", "displayName": "Command R+", "description": "Command R+ is Cohere's latest LLM and is the first open weight model to beat GPT4 in the Chatbot Arena!", "tools": true, "tokenizer": "Xenova/c4ai-command-r-v01-tokenizer", "modelUrl": "https://huggingface.co/CohereForAI/c4ai-command-r-plus", "websiteUrl": "https://docs.cohere.com/docs/command-r-plus", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/cohere-logo.png", "parameters": { "stop": ["<|END_OF_TURN_TOKEN|>"], "truncate" : 28672, "max_new_tokens" : 4096, "temperature" : 0.3 } } ]` ``` ## Cohere Configuration The Cohere provider supports the endpoint native method of tool calling. Refer to the `endpoints/cohere` for implementation details. ```ini MODELS=`[ { "name": "command-r-plus", "displayName": "Command R+", "description": "Command R+ is Cohere's latest LLM and is the first open weight model to beat GPT4 in the Chatbot Arena!", "tools": true, "websiteUrl": "https://docs.cohere.com/docs/command-r-plus", "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/cohere-logo.png", "endpoints": [{ "type": "cohere", "apiKey": "YOUR_API_KEY" }] } ]` ``` ## Adding Tools Tool implementations are placed in `src/lib/server/tools`, with helpers available for easy integration with HuggingFace Zero GPU spaces. In the future, there may be an OpenAPI interface for adding tools. ## Adding Support for Additional Models The TGI implementation uses a custom tokenizer and hard coded schema for supporting tools. The Cohere implementation, on the other hand, uses the native support in the SDK to emit tool calls. This is the recommended way to add support for more models. Please see the `endpoints/cohere` section of the code for implementation details.
chat-ui/docs/source/configuration/models/tools.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/tools.md", "repo_id": "chat-ui", "token_count": 948 }
import readline from "readline"; import minimist from "minimist"; // @ts-expect-error: vite-node makes the var available but the typescript compiler doesn't see them import { env } from "$env/dynamic/private"; import { faker } from "@faker-js/faker"; import { ObjectId } from "mongodb"; // @ts-expect-error: vite-node makes the var available but the typescript compiler doesn't see them import { collections } from "$lib/server/database"; import { models } from "../src/lib/server/models.ts"; import type { User } from "../src/lib/types/User"; import type { Assistant } from "../src/lib/types/Assistant"; import type { Conversation } from "../src/lib/types/Conversation"; import type { Settings } from "../src/lib/types/Settings"; import type { CommunityToolDB, ToolLogoColor, ToolLogoIcon } from "../src/lib/types/Tool"; import { defaultEmbeddingModel } from "../src/lib/server/embeddingModels.ts"; import { Message } from "../src/lib/types/Message.ts"; import { addChildren } from "../src/lib/utils/tree/addChildren.ts"; import { generateSearchTokens } from "../src/lib/utils/searchTokens.ts"; import { ReviewStatus } from "../src/lib/types/Review.ts"; const rl = readline.createInterface({ input: process.stdin, output: process.stdout, }); rl.on("close", function () { process.exit(0); }); const possibleFlags = ["reset", "all", "users", "settings", "assistants", "conversations", "tools"]; const argv = minimist(process.argv.slice(2)); const flags = argv["_"].filter((flag) => possibleFlags.includes(flag)); async function generateMessages(preprompt?: string): Promise<Message[]> { const isLinear = faker.datatype.boolean(0.5); const isInterrupted = faker.datatype.boolean(0.05); const messages: Message[] = []; messages.push({ id: crypto.randomUUID(), from: "system", content: preprompt ?? "", createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), }); let isUser = true; let lastId = messages[0].id; if (isLinear) { const convLength = faker.number.int({ min: 1, max: 25 }) * 2; // must always be even for (let i = 0; i < convLength; i++) { lastId = addChildren( { messages, rootMessageId: messages[0].id, }, { from: isUser ? "user" : "assistant", content: faker.lorem.sentence({ min: 10, max: isUser ? 50 : 200, }), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), interrupted: i === convLength - 1 && isInterrupted, }, lastId ); isUser = !isUser; } } else { const convLength = faker.number.int({ min: 2, max: 200 }); for (let i = 0; i < convLength; i++) { addChildren( { messages, rootMessageId: messages[0].id, }, { from: isUser ? "user" : "assistant", content: faker.lorem.sentence({ min: 10, max: isUser ? 50 : 200, }), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), interrupted: i === convLength - 1 && isInterrupted, }, faker.helpers.arrayElement([ messages[0].id, ...messages.filter((m) => m.from === (isUser ? "assistant" : "user")).map((m) => m.id), ]) ); isUser = !isUser; } } return messages; } async function seed() { console.log("Seeding..."); const modelIds = models.map((model) => model.id); if (flags.includes("reset")) { console.log("Starting reset of DB"); await collections.users.deleteMany({}); await collections.settings.deleteMany({}); await collections.assistants.deleteMany({}); await collections.conversations.deleteMany({}); await collections.tools.deleteMany({}); await collections.migrationResults.deleteMany({}); await collections.semaphores.deleteMany({}); console.log("Reset done"); } if (flags.includes("users") || flags.includes("all")) { console.log("Creating 100 new users"); const newUsers: User[] = Array.from({ length: 100 }, () => ({ _id: new ObjectId(), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), username: faker.internet.userName(), name: faker.person.fullName(), hfUserId: faker.string.alphanumeric(24), avatarUrl: faker.image.avatar(), })); await collections.users.insertMany(newUsers); console.log("Done creating users."); } const users = await collections.users.find().toArray(); if (flags.includes("settings") || flags.includes("all")) { console.log("Updating settings for all users"); users.forEach(async (user) => { const settings: Settings = { userId: user._id, shareConversationsWithModelAuthors: faker.datatype.boolean(0.25), hideEmojiOnSidebar: faker.datatype.boolean(0.25), ethicsModalAcceptedAt: faker.date.recent({ days: 30 }), activeModel: faker.helpers.arrayElement(modelIds), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), disableStream: faker.datatype.boolean(0.25), directPaste: faker.datatype.boolean(0.25), customPrompts: {}, assistants: [], }; await collections.settings.updateOne( { userId: user._id }, { $set: { ...settings } }, { upsert: true } ); }); console.log("Done updating settings."); } if (flags.includes("assistants") || flags.includes("all")) { console.log("Creating assistants for all users"); await Promise.all( users.map(async (user) => { const name = faker.animal.insect(); const assistants = faker.helpers.multiple<Assistant>( () => ({ _id: new ObjectId(), name, createdById: user._id, createdByName: user.username, createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), userCount: faker.number.int({ min: 1, max: 100000 }), review: faker.helpers.enumValue(ReviewStatus), modelId: faker.helpers.arrayElement(modelIds), description: faker.lorem.sentence(), preprompt: faker.hacker.phrase(), exampleInputs: faker.helpers.multiple(() => faker.lorem.sentence(), { count: faker.number.int({ min: 0, max: 4 }), }), searchTokens: generateSearchTokens(name), last24HoursCount: faker.number.int({ min: 0, max: 1000 }), }), { count: faker.number.int({ min: 3, max: 10 }) } ); await collections.assistants.insertMany(assistants); await collections.settings.updateOne( { userId: user._id }, { $set: { assistants: assistants.map((a) => a._id.toString()) } }, { upsert: true } ); }) ); console.log("Done creating assistants."); } if (flags.includes("conversations") || flags.includes("all")) { console.log("Creating conversations for all users"); await Promise.all( users.map(async (user) => { const conversations = faker.helpers.multiple( async () => { const settings = await collections.settings.findOne<Settings>({ userId: user._id }); const assistantId = settings?.assistants && settings.assistants.length > 0 && faker.datatype.boolean(0.1) ? faker.helpers.arrayElement<ObjectId>(settings.assistants) : undefined; const preprompt = (assistantId ? await collections.assistants .findOne({ _id: assistantId }) .then((assistant: Assistant) => assistant?.preprompt ?? "") : faker.helpers.maybe(() => faker.hacker.phrase(), { probability: 0.5 })) ?? ""; const messages = await generateMessages(preprompt); const conv = { _id: new ObjectId(), userId: user._id, assistantId, preprompt, createdAt: faker.date.recent({ days: 145 }), updatedAt: faker.date.recent({ days: 145 }), model: faker.helpers.arrayElement(modelIds), title: faker.internet.emoji() + " " + faker.hacker.phrase(), embeddingModel: defaultEmbeddingModel.id, messages, rootMessageId: messages[0].id, } satisfies Conversation; return conv; }, { count: faker.number.int({ min: 10, max: 200 }) } ); await collections.conversations.insertMany(await Promise.all(conversations)); }) ); console.log("Done creating conversations."); } // generate Community Tools if (flags.includes("tools") || flags.includes("all")) { const tools = await Promise.all( faker.helpers.multiple( () => { const _id = new ObjectId(); const displayName = faker.company.catchPhrase(); const description = faker.company.catchPhrase(); const color = faker.helpers.arrayElement([ "purple", "blue", "green", "yellow", "red", ]) satisfies ToolLogoColor; const icon = faker.helpers.arrayElement([ "wikis", "tools", "camera", "code", "email", "cloud", "terminal", "game", "chat", "speaker", "video", ]) satisfies ToolLogoIcon; const baseUrl = faker.helpers.arrayElement([ "stabilityai/stable-diffusion-3-medium", "multimodalart/cosxl", "gokaygokay/SD3-Long-Captioner", "xichenhku/MimicBrush", ]); // keep empty for populate for now const user: User = faker.helpers.arrayElement(users); const createdById = user._id; const createdByName = user.username ?? user.name; return { type: "community" as const, _id, createdById, createdByName, displayName, name: displayName.toLowerCase().replace(" ", "_"), endpoint: "/test", description, color, icon, baseUrl, inputs: [], outputPath: null, outputType: "str" as const, showOutput: false, useCount: faker.number.int({ min: 0, max: 100000 }), last24HoursUseCount: faker.number.int({ min: 0, max: 1000 }), createdAt: faker.date.recent({ days: 30 }), updatedAt: faker.date.recent({ days: 30 }), searchTokens: generateSearchTokens(displayName), review: faker.helpers.enumValue(ReviewStatus), outputComponent: null, outputComponentIdx: null, }; }, { count: faker.number.int({ min: 10, max: 200 }) } ) ); await collections.tools.insertMany(tools satisfies CommunityToolDB[]); } } // run seed (async () => { try { rl.question( "You're about to run a seeding script on the following MONGODB_URL: \x1b[31m" + env.MONGODB_URL + "\x1b[0m\n\n With the following flags: \x1b[31m" + flags.join("\x1b[0m , \x1b[31m") + "\x1b[0m\n \n\n Are you sure you want to continue? (yes/no): ", async (confirm) => { if (confirm !== "yes") { console.log("Not 'yes', exiting."); rl.close(); process.exit(0); } console.log("Starting seeding..."); await seed(); console.log("Seeding done."); rl.close(); } ); } catch (e) { console.error(e); process.exit(1); } })();
chat-ui/scripts/populate.ts/0
{ "file_path": "chat-ui/scripts/populate.ts", "repo_id": "chat-ui", "token_count": 4497 }
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/state"; import { env as envPublic } from "$env/dynamic/public"; import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte"; import Modal from "$lib/components/Modal.svelte"; import { useSettingsStore } from "$lib/stores/settings"; import { cookiesAreEnabled } from "$lib/utils/cookiesAreEnabled"; import Logo from "./icons/Logo.svelte"; const settings = useSettingsStore(); </script> <Modal on:close> <div class="from-primary-500/40 via-primary-500/10 to-primary-500/0 flex w-full flex-col items-center gap-6 bg-gradient-to-b px-5 pb-8 pt-9 text-center sm:px-6" > <h2 class="flex items-center text-2xl font-semibold text-gray-800"> <Logo classNames="mr-1" /> {envPublic.PUBLIC_APP_NAME} </h2> <p class="text-lg font-semibold leading-snug text-gray-800" style="text-wrap: balance;"> {envPublic.PUBLIC_APP_DESCRIPTION} </p> <p class="text-sm text-gray-500"> {envPublic.PUBLIC_APP_DISCLAIMER_MESSAGE} </p> <div class="flex w-full flex-col items-center gap-2"> <button class="w-full justify-center rounded-full border-2 border-gray-300 bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" class:bg-white={page.data.loginEnabled} class:text-gray-800={page.data.loginEnabled} class:hover:bg-slate-100={page.data.loginEnabled} onclick={(e) => { e.preventDefault(); e.stopPropagation(); if (!cookiesAreEnabled()) { window.open(window.location.href, "_blank"); } $settings.ethicsModalAccepted = true; }} > {#if page.data.loginEnabled} {#if page.data.guestMode} Continue as guest {:else} Explore the app {/if} {:else} Start chatting {/if} </button> {#if page.data.loginEnabled} <form action="{base}/login" target="_parent" method="POST" class="w-full"> <button type="submit" class="flex w-full flex-wrap items-center justify-center whitespace-nowrap rounded-full border-2 border-black bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" > Sign in {#if envPublic.PUBLIC_APP_NAME === "HuggingChat"} <span class="flex items-center"> &nbsp;with <LogoHuggingFaceBorderless classNames="text-xl mr-1 ml-1.5 flex-none" /> Hugging Face </span> {/if} </button> </form> {/if} </div> </div> </Modal>
chat-ui/src/lib/components/DisclaimerModal.svelte/0
{ "file_path": "chat-ui/src/lib/components/DisclaimerModal.svelte", "repo_id": "chat-ui", "token_count": 1086 }
<script lang="ts"> import { fade } from "svelte/transition"; import { onDestroy, untrack } from "svelte"; import IconChevron from "./icons/IconChevron.svelte"; let visible = $state(false); interface Props { scrollNode: HTMLElement; class?: string; } let { scrollNode, class: className = "" }: Props = $props(); let observer: ResizeObserver | null = $state(null); function updateVisibility() { if (!scrollNode) return; visible = Math.ceil(scrollNode.scrollTop) + 200 < scrollNode.scrollHeight - scrollNode.clientHeight && scrollNode.scrollTop > 200; } function scrollToPrevious() { if (!scrollNode) return; const messages = scrollNode.querySelectorAll("[data-message-id]"); const scrollTop = scrollNode.scrollTop; let previousMessage: Element | null = null; for (let i = messages.length - 1; i >= 0; i--) { const messageTop = messages[i].getBoundingClientRect().top + scrollTop - scrollNode.getBoundingClientRect().top; if (messageTop < scrollTop - 1) { previousMessage = messages[i]; break; } } if (previousMessage) { previousMessage.scrollIntoView({ behavior: "smooth", block: "start" }); } } function destroy() { observer?.disconnect(); scrollNode?.removeEventListener("scroll", updateVisibility); } onDestroy(destroy); $effect(() => { scrollNode && untrack(() => { if (scrollNode) { destroy(); if (window.ResizeObserver) { observer = new ResizeObserver(() => { updateVisibility(); }); observer.observe(scrollNode); } scrollNode.addEventListener("scroll", updateVisibility); } }); }); </script> {#if visible} <button transition:fade={{ duration: 150 }} onclick={scrollToPrevious} class="btn absolute flex h-[41px] w-[41px] rounded-full border bg-white shadow-md transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:shadow-gray-950 dark:hover:bg-gray-600 {className}" > <IconChevron classNames="rotate-180 mt-[2px]" /> </button> {/if}
chat-ui/src/lib/components/ScrollToPreviousBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/ScrollToPreviousBtn.svelte", "repo_id": "chat-ui", "token_count": 771 }
<script lang="ts"> import { run } from "svelte/legacy"; import type { Message } from "$lib/types/Message"; import { createEventDispatcher, tick } from "svelte"; import { page } from "$app/state"; import CopyToClipBoardBtn from "../CopyToClipBoardBtn.svelte"; import IconLoading from "../icons/IconLoading.svelte"; import CarbonRotate360 from "~icons/carbon/rotate-360"; import CarbonDownload from "~icons/carbon/download"; import CarbonPen from "~icons/carbon/pen"; import UploadedFile from "./UploadedFile.svelte"; import OpenWebSearchResults from "../OpenWebSearchResults.svelte"; import { MessageUpdateType, MessageWebSearchUpdateType, type MessageToolUpdate, type MessageWebSearchSourcesUpdate, type MessageWebSearchUpdate, type MessageFinalAnswerUpdate, type MessageReasoningUpdate, MessageReasoningUpdateType, } from "$lib/types/MessageUpdate"; import { base } from "$app/paths"; import ToolUpdate from "./ToolUpdate.svelte"; import MarkdownRenderer from "./MarkdownRenderer.svelte"; import OpenReasoningResults from "./OpenReasoningResults.svelte"; import Alternatives from "./Alternatives.svelte"; import Vote from "./Vote.svelte"; interface Props { message: Message; loading?: boolean; isAuthor?: boolean; readOnly?: boolean; isTapped?: boolean; alternatives?: Message["id"][]; editMsdgId?: Message["id"] | null; isLast?: boolean; } let { message, loading = false, isAuthor = true, readOnly = false, isTapped = $bindable(false), alternatives = [], editMsdgId = $bindable(null), isLast = false, }: Props = $props(); const dispatch = createEventDispatcher<{ retry: { content?: string; id: Message["id"] }; }>(); let contentEl: HTMLElement | undefined = $state(); let isCopied = $state(false); function handleKeyDown(e: KeyboardEvent) { if (e.key === "Enter" && (e.metaKey || e.ctrlKey)) { editFormEl?.requestSubmit(); } } let editContentEl: HTMLTextAreaElement | undefined = $state(); let editFormEl: HTMLFormElement | undefined = $state(); let searchUpdates = $derived( (message.updates?.filter(({ type }) => type === MessageUpdateType.WebSearch) ?? []) as MessageWebSearchUpdate[] ); let reasoningUpdates = $derived( (message.updates?.filter(({ type }) => type === MessageUpdateType.Reasoning) ?? []) as MessageReasoningUpdate[] ); let messageFinalAnswer = $derived( message.updates?.find( ({ type }) => type === MessageUpdateType.FinalAnswer ) as MessageFinalAnswerUpdate ); // filter all updates with type === "tool" then group them by uuid field let toolUpdates = $derived( message.updates ?.filter(({ type }) => type === "tool") .reduce( (acc, update) => { if (update.type !== "tool") { return acc; } acc[update.uuid] = acc[update.uuid] ?? []; acc[update.uuid].push(update); return acc; }, {} as Record<string, MessageToolUpdate[]> ) ); let urlNotTrailing = $derived(page.url.pathname.replace(/\/$/, "")); let downloadLink = $derived(urlNotTrailing + `/message/${message.id}/prompt`); let webSearchSources = $derived( searchUpdates?.find( (update): update is MessageWebSearchSourcesUpdate => update.subtype === MessageWebSearchUpdateType.Sources )?.sources ); run(() => { if (isCopied) { setTimeout(() => { isCopied = false; }, 1000); } }); let editMode = $derived(editMsdgId === message.id); run(() => { if (editMode) { tick(); if (editContentEl) { editContentEl.value = message.content; editContentEl?.focus(); } } }); </script> {#if message.from === "assistant"} <div class="group relative -mb-4 flex items-start justify-start gap-4 pb-4 leading-relaxed" data-message-id={message.id} data-message-role="assistant" role="presentation" onclick={() => (isTapped = !isTapped)} onkeydown={() => (isTapped = !isTapped)} > {#if page.data?.assistant?.avatar} <img src="{base}/settings/assistants/{page.data.assistant._id}/avatar.jpg" alt="Avatar" class="mt-5 h-3 w-3 flex-none select-none rounded-full shadow-lg" /> {:else} <img alt="" src="https://huggingface.co/avatars/2edb18bd0206c16b433841a47f53fa8e.svg" class="mt-5 h-3 w-3 flex-none select-none rounded-full shadow-lg" /> {/if} <div class="relative min-h-[calc(2rem+theme(spacing[3.5])*2)] min-w-[60px] break-words rounded-2xl border border-gray-100 bg-gradient-to-br from-gray-50 px-5 py-3.5 text-gray-600 prose-pre:my-2 dark:border-gray-800 dark:from-gray-800/40 dark:text-gray-300" > {#if message.files?.length} <div class="flex h-fit flex-wrap gap-x-5 gap-y-2"> {#each message.files as file} <UploadedFile {file} canClose={false} /> {/each} </div> {/if} {#if searchUpdates && searchUpdates.length > 0} <OpenWebSearchResults webSearchMessages={searchUpdates} /> {/if} {#if reasoningUpdates && reasoningUpdates.length > 0 && message.reasoning && message.reasoning.trim().length > 0} {@const summaries = reasoningUpdates .filter((u) => u.subtype === MessageReasoningUpdateType.Status) .map((u) => u.status)} <OpenReasoningResults summary={summaries[summaries.length - 1] || ""} content={message.reasoning || ""} loading={loading && message.content.length === 0} /> {/if} {#if toolUpdates} {#each Object.values(toolUpdates) as tool} {#if tool.length} {#key tool[0].uuid} <ToolUpdate {tool} {loading} /> {/key} {/if} {/each} {/if} <div bind:this={contentEl} class:mt-2={reasoningUpdates.length > 0 || searchUpdates.length > 0} > {#if isLast && loading && message.content.length === 0} <IconLoading classNames="loading inline ml-2 first:ml-0" /> {/if} <div class="prose max-w-none dark:prose-invert max-sm:prose-sm prose-headings:font-semibold prose-h1:text-lg prose-h2:text-base prose-h3:text-base prose-pre:bg-gray-800 dark:prose-pre:bg-gray-900" > <MarkdownRenderer content={message.content} sources={webSearchSources} /> </div> </div> <!-- Web Search sources --> {#if webSearchSources?.length} <div class="mt-4 flex flex-wrap items-center gap-x-2 gap-y-1.5 text-sm"> <div class="text-gray-400">Sources:</div> {#each webSearchSources as { link, title }} <a class="flex items-center gap-2 whitespace-nowrap rounded-lg border bg-white px-2 py-1.5 leading-none hover:border-gray-300 dark:border-gray-800 dark:bg-gray-900 dark:hover:border-gray-700" href={link} target="_blank" > <img class="h-3.5 w-3.5 rounded" src="https://www.google.com/s2/favicons?sz=64&domain_url={new URL(link).hostname || 'placeholder'}" alt="{title} favicon" /> <div>{new URL(link).hostname.replace(/^www\./, "")}</div> </a> {/each} </div> {/if} <!-- Endpoint web sources --> {#if messageFinalAnswer?.webSources && messageFinalAnswer.webSources.length} <div class="mt-4 flex flex-wrap items-center gap-x-2 gap-y-1.5 text-sm"> <div class="text-gray-400">Sources:</div> {#each messageFinalAnswer.webSources as { uri, title }} <a class="flex items-center gap-2 whitespace-nowrap rounded-lg border bg-white px-2 py-1.5 leading-none hover:border-gray-300 dark:border-gray-800 dark:bg-gray-900 dark:hover:border-gray-700" href={uri} target="_blank" > <img class="h-3.5 w-3.5 rounded" src="https://www.google.com/s2/favicons?sz=64&domain_url={new URL(uri).hostname || 'placeholder'}" alt="{title} favicon" /> <div>{title}</div> </a> {/each} </div> {/if} </div> {#if !loading && (message.content || toolUpdates)} <div class="absolute -bottom-4 right-0 flex max-md:transition-all md:group-hover:visible md:group-hover:opacity-100 {message.score ? 'visible opacity-100' : 'invisible max-md:-translate-y-4 max-md:opacity-0'} {isTapped || isCopied ? 'max-md:visible max-md:translate-y-0 max-md:opacity-100' : ''} " > {#if isAuthor} <Vote {message} on:vote /> {/if} <button class="btn rounded-sm p-1 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300" title="Retry" type="button" onclick={() => { dispatch("retry", { id: message.id }); }} > <CarbonRotate360 /> </button> <CopyToClipBoardBtn onClick={() => { isCopied = true; }} classNames="btn rounded-sm p-1 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300" value={message.content} /> </div> {/if} </div> {#if alternatives.length > 1 && editMsdgId === null} <Alternatives {message} {alternatives} {loading} on:showAlternateMsg /> {/if} {/if} {#if message.from === "user"} <div class="group relative w-full items-start justify-start gap-4 max-sm:text-sm" data-message-id={message.id} data-message-type="user" role="presentation" onclick={() => (isTapped = !isTapped)} onkeydown={() => (isTapped = !isTapped)} > <div class="flex w-full flex-col gap-2"> {#if message.files?.length} <div class="flex w-fit gap-4 px-5"> {#each message.files as file} <UploadedFile {file} canClose={false} /> {/each} </div> {/if} <div class="flex w-full flex-row flex-nowrap"> {#if !editMode} <p class="disabled w-full appearance-none whitespace-break-spaces text-wrap break-words bg-inherit px-5 py-3.5 text-gray-500 dark:text-gray-400" > {message.content.trim()} </p> {:else} <form class="flex w-full flex-col" bind:this={editFormEl} onsubmit={(e) => { e.preventDefault(); dispatch("retry", { content: editContentEl?.value, id: message.id }); editMsdgId = null; }} > <textarea class="w-full whitespace-break-spaces break-words rounded-xl bg-gray-100 px-5 py-3.5 text-gray-500 *:h-max dark:bg-gray-800 dark:text-gray-400" rows="5" bind:this={editContentEl} value={message.content.trim()} onkeydown={handleKeyDown} required ></textarea> <div class="flex w-full flex-row flex-nowrap items-center justify-center gap-2 pt-2"> <button type="submit" class="btn rounded-lg px-3 py-1.5 text-sm {loading ? 'bg-gray-300 text-gray-400 dark:bg-gray-700 dark:text-gray-600' : 'bg-gray-200 text-gray-600 hover:text-gray-800 focus:ring-0 dark:bg-gray-800 dark:text-gray-300 dark:hover:text-gray-200'} " disabled={loading} > Submit </button> <button type="button" class="btn rounded-sm p-2 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300" onclick={() => { editMsdgId = null; }} > Cancel </button> </div> </form> {/if} {#if !loading && !editMode} <div class=" max-md:opacity-0' invisible absolute right-0 top-3.5 z-10 h-max max-md:-translate-y-4 max-md:transition-all md:bottom-0 md:group-hover:visible md:group-hover:opacity-100 {isTapped || isCopied ? 'max-md:visible max-md:translate-y-0 max-md:opacity-100' : ''}" > <div class="mx-auto flex flex-row flex-nowrap gap-2"> <a class="rounded-lg border border-gray-100 bg-gray-100 p-1 text-xs text-gray-400 group-hover:block hover:text-gray-500 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-400 dark:hover:text-gray-300 max-sm:!hidden md:hidden" title="Download prompt and parameters" type="button" target="_blank" href={downloadLink} > <CarbonDownload /> </a> {#if !readOnly} <button class="cursor-pointer rounded-lg border border-gray-100 bg-gray-100 p-1 text-xs text-gray-400 group-hover:block hover:text-gray-500 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-400 dark:hover:text-gray-300 md:hidden lg:-right-2" title="Branch" type="button" onclick={() => (editMsdgId = message.id)} > <CarbonPen /> </button> {/if} </div> </div> {/if} </div> {#if alternatives.length > 1 && editMsdgId === null} <Alternatives {message} {alternatives} {loading} on:showAlternateMsg /> {/if} </div> </div> {/if} <style> @keyframes loading { to { stroke-dashoffset: 122.9; } } </style>
chat-ui/src/lib/components/chat/ChatMessage.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatMessage.svelte", "repo_id": "chat-ui", "token_count": 5655 }
<script lang="ts"> interface Props { classNames?: string; } let { classNames = "" }: Props = $props(); </script> <svg class={classNames} xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" fill="currentColor" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" ><path d="M19.02 5.57a5.77 5.77 0 1 1 8.56 7.74L16.6 25.45l-.02.01v.01A7.87 7.87 0 0 1 4.92 14.9L12.95 6A1.18 1.18 0 0 1 14.7 7.6l-8.03 8.87a5.51 5.51 0 1 0 8.19 7.4l10.97-12.14a3.41 3.41 0 1 0-5.06-4.58l-9.32 10.3a1.27 1.27 0 1 0 1.88 1.7l6.28-6.94a1.18 1.18 0 0 1 1.75 1.59l-6.28 6.94a3.63 3.63 0 0 1-5.41-4.83l.02-.02 9.33-10.32Z" fill="currentColor" /></svg >
chat-ui/src/lib/components/icons/IconPaperclip.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconPaperclip.svelte", "repo_id": "chat-ui", "token_count": 381 }
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { ObjectId, type WithId } from "mongodb"; import type { Conversation } from "$lib/types/Conversation"; import type { WebSearchSource } from "$lib/types/WebSearch"; import { MessageUpdateStatus, MessageUpdateType, MessageWebSearchUpdateType, type MessageUpdate, type MessageWebSearchFinishedUpdate, } from "$lib/types/MessageUpdate"; import type { Message } from "$lib/types/Message"; import { isMessageWebSearchSourcesUpdate } from "$lib/utils/messageUpdates"; // ----------- // Copy of the previous message update types export type FinalAnswer = { type: "finalAnswer"; text: string; }; export type TextStreamUpdate = { type: "stream"; token: string; }; type WebSearchUpdate = { type: "webSearch"; messageType: "update" | "error" | "sources"; message: string; args?: string[]; sources?: WebSearchSource[]; }; type StatusUpdate = { type: "status"; status: "started" | "pending" | "finished" | "error" | "title"; message?: string; }; type ErrorUpdate = { type: "error"; message: string; name: string; }; type FileUpdate = { type: "file"; sha: string; }; type OldMessageUpdate = | FinalAnswer | TextStreamUpdate | WebSearchUpdate | StatusUpdate | ErrorUpdate | FileUpdate; /** Converts the old message update to the new schema */ function convertMessageUpdate(message: Message, update: OldMessageUpdate): MessageUpdate | null { try { // Text and files if (update.type === "finalAnswer") { return { type: MessageUpdateType.FinalAnswer, text: update.text, interrupted: message.interrupted ?? false, }; } else if (update.type === "stream") { return { type: MessageUpdateType.Stream, token: update.token, }; } else if (update.type === "file") { return { type: MessageUpdateType.File, name: "Unknown", sha: update.sha, // assume jpeg but could be any image. should be harmless mime: "image/jpeg", }; } // Status else if (update.type === "status") { if (update.status === "title") { return { type: MessageUpdateType.Title, title: update.message ?? "New Chat", }; } if (update.status === "pending") return null; const status = update.status === "started" ? MessageUpdateStatus.Started : update.status === "finished" ? MessageUpdateStatus.Finished : MessageUpdateStatus.Error; return { type: MessageUpdateType.Status, status, message: update.message, }; } else if (update.type === "error") { // Treat it as an error status update return { type: MessageUpdateType.Status, status: MessageUpdateStatus.Error, message: update.message, }; } // Web Search else if (update.type === "webSearch") { if (update.messageType === "update") { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Update, message: update.message, args: update.args, }; } else if (update.messageType === "error") { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Error, message: update.message, args: update.args, }; } else if (update.messageType === "sources") { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Sources, message: update.message, sources: update.sources ?? [], }; } } console.warn("Unknown message update during migration:", update); return null; } catch (error) { console.error("Error converting message update during migration. Skipping it... Error:", error); return null; } } const updateMessageUpdates: Migration = { _id: new ObjectId("5f9f7f7f7f7f7f7f7f7f7f7f"), name: "Convert message updates to the new schema", up: async () => { const allConversations = collections.conversations.find({}); let conversation: WithId<Pick<Conversation, "messages">> | null = null; while ((conversation = await allConversations.tryNext())) { const messages = conversation.messages.map((message) => { // Convert all of the existing updates to the new schema const updates = message.updates ?.map((update) => convertMessageUpdate(message, update as OldMessageUpdate)) .filter((update): update is MessageUpdate => Boolean(update)); // Add the new web search finished update if the sources update exists and webSearch is defined const webSearchSourcesUpdateIndex = updates?.findIndex(isMessageWebSearchSourcesUpdate); if ( message.webSearch && updates && webSearchSourcesUpdateIndex && webSearchSourcesUpdateIndex !== -1 ) { const webSearchFinishedUpdate: MessageWebSearchFinishedUpdate = { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Finished, }; updates.splice(webSearchSourcesUpdateIndex + 1, 0, webSearchFinishedUpdate); } return { ...message, updates }; }); // Set the new messages array await collections.conversations.updateOne({ _id: conversation._id }, { $set: { messages } }); } return true; }, runEveryTime: false, }; export default updateMessageUpdates;
chat-ui/src/lib/migrations/routines/04-update-message-updates.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/04-update-message-updates.ts", "repo_id": "chat-ui", "token_count": 1830 }
import { env } from "$env/dynamic/private"; import { z } from "zod"; import { sum } from "$lib/utils/sum"; import { embeddingEndpoints, embeddingEndpointSchema, type EmbeddingEndpoint, } from "$lib/server/embeddingEndpoints/embeddingEndpoints"; import { embeddingEndpointTransformersJS } from "$lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints"; import JSON5 from "json5"; const modelConfig = z.object({ /** Used as an identifier in DB */ id: z.string().optional(), /** Used to link to the model page, and for inference */ name: z.string().min(1), displayName: z.string().min(1).optional(), description: z.string().min(1).optional(), websiteUrl: z.string().url().optional(), modelUrl: z.string().url().optional(), endpoints: z.array(embeddingEndpointSchema).nonempty(), chunkCharLength: z.number().positive(), maxBatchSize: z.number().positive().optional(), preQuery: z.string().default(""), prePassage: z.string().default(""), }); // Default embedding model for backward compatibility const rawEmbeddingModelJSON = env.TEXT_EMBEDDING_MODELS || `[ { "name": "Xenova/gte-small", "chunkCharLength": 512, "endpoints": [ { "type": "transformersjs" } ] } ]`; const embeddingModelsRaw = z.array(modelConfig).parse(JSON5.parse(rawEmbeddingModelJSON)); const processEmbeddingModel = async (m: z.infer<typeof modelConfig>) => ({ ...m, id: m.id || m.name, }); const addEndpoint = (m: Awaited<ReturnType<typeof processEmbeddingModel>>) => ({ ...m, getEndpoint: async (): Promise<EmbeddingEndpoint> => { if (!m.endpoints) { return embeddingEndpointTransformersJS({ type: "transformersjs", weight: 1, model: m, }); } const totalWeight = sum(m.endpoints.map((e) => e.weight)); let random = Math.random() * totalWeight; for (const endpoint of m.endpoints) { if (random < endpoint.weight) { const args = { ...endpoint, model: m }; switch (args.type) { case "tei": return embeddingEndpoints.tei(args); case "transformersjs": return embeddingEndpoints.transformersjs(args); case "openai": return embeddingEndpoints.openai(args); case "hfapi": return embeddingEndpoints.hfapi(args); default: throw new Error(`Unknown endpoint type: ${args}`); } } random -= endpoint.weight; } throw new Error(`Failed to select embedding endpoint`); }, }); export const embeddingModels = await Promise.all( embeddingModelsRaw.map((e) => processEmbeddingModel(e).then(addEndpoint)) ); export const defaultEmbeddingModel = embeddingModels[0]; const validateEmbeddingModel = (_models: EmbeddingBackendModel[], key: "id" | "name") => { return z.enum([_models[0][key], ..._models.slice(1).map((m) => m[key])]); }; export const validateEmbeddingModelById = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "id"); }; export const validateEmbeddingModelByName = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "name"); }; export type EmbeddingBackendModel = typeof defaultEmbeddingModel;
chat-ui/src/lib/server/embeddingModels.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingModels.ts", "repo_id": "chat-ui", "token_count": 1115 }
import { z } from "zod"; import { openAICompletionToTextGenerationStream } from "./openAICompletionToTextGenerationStream"; import { openAIChatToTextGenerationStream } from "./openAIChatToTextGenerationStream"; import type { CompletionCreateParamsStreaming } from "openai/resources/completions"; import type { ChatCompletionCreateParamsStreaming, ChatCompletionTool, } from "openai/resources/chat/completions"; import type { FunctionDefinition, FunctionParameters } from "openai/resources/shared"; import { buildPrompt } from "$lib/buildPrompt"; import { env } from "$env/dynamic/private"; import type { Endpoint } from "../endpoints"; import type OpenAI from "openai"; import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images"; import type { MessageFile } from "$lib/types/Message"; import { type Tool } from "$lib/types/Tool"; import type { EndpointMessage } from "../endpoints"; import { v4 as uuidv4 } from "uuid"; function createChatCompletionToolsArray(tools: Tool[] | undefined): ChatCompletionTool[] { const toolChoices = [] as ChatCompletionTool[]; if (tools === undefined) { return toolChoices; } for (const t of tools) { const requiredProperties = [] as string[]; const properties = {} as Record<string, unknown>; for (const idx in t.inputs) { const parameterDefinition = t.inputs[idx]; const parameter = {} as Record<string, unknown>; switch (parameterDefinition.type) { case "str": parameter.type = "string"; break; case "float": case "int": parameter.type = "number"; break; case "bool": parameter.type = "boolean"; break; case "file": throw new Error("File type's currently not supported"); default: throw new Error(`Unknown tool IO type: ${t}`); } if ("description" in parameterDefinition) { parameter.description = parameterDefinition.description; } if (parameterDefinition.paramType == "required") { requiredProperties.push(t.inputs[idx].name); } properties[t.inputs[idx].name] = parameter; } const functionParameters: FunctionParameters = { type: "object", ...(requiredProperties.length > 0 ? { required: requiredProperties } : {}), properties, }; const functionDefinition: FunctionDefinition = { name: t.name, description: t.description, parameters: functionParameters, }; const toolDefinition: ChatCompletionTool = { type: "function", function: functionDefinition, }; toolChoices.push(toolDefinition); } return toolChoices; } export const endpointOAIParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("openai"), baseURL: z.string().url().default("https://api.openai.com/v1"), apiKey: z.string().default(env.OPENAI_API_KEY || env.HF_TOKEN || "sk-"), completion: z .union([z.literal("completions"), z.literal("chat_completions")]) .default("chat_completions"), defaultHeaders: z.record(z.string()).optional(), defaultQuery: z.record(z.string()).optional(), extraBody: z.record(z.any()).optional(), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: [ "image/png", "image/jpeg", "image/webp", "image/avif", "image/tiff", "image/gif", ], preferredMimeType: "image/webp", maxSizeInMB: Infinity, maxWidth: 4096, maxHeight: 4096, }), }) .default({}), /* enable use of max_completion_tokens in place of max_tokens */ useCompletionTokens: z.boolean().default(false), }); export async function endpointOai( input: z.input<typeof endpointOAIParametersSchema> ): Promise<Endpoint> { const { baseURL, apiKey, completion, model, defaultHeaders, defaultQuery, multimodal, extraBody, useCompletionTokens, } = endpointOAIParametersSchema.parse(input); let OpenAI; try { OpenAI = (await import("openai")).OpenAI; } catch (e) { throw new Error("Failed to import OpenAI", { cause: e }); } const openai = new OpenAI({ apiKey: apiKey || "sk-", baseURL, defaultHeaders, defaultQuery, }); const imageProcessor = makeImageProcessor(multimodal.image); if (completion === "completions") { if (model.tools) { throw new Error( "Tools are not supported for 'completions' mode, switch to 'chat_completions' instead" ); } return async ({ messages, preprompt, continueMessage, generateSettings, conversationId }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); const parameters = { ...model.parameters, ...generateSettings }; const body: CompletionCreateParamsStreaming = { model: model.id ?? model.name, prompt, stream: true, max_tokens: parameters?.max_new_tokens, stop: parameters?.stop, temperature: parameters?.temperature, top_p: parameters?.top_p, frequency_penalty: parameters?.repetition_penalty, presence_penalty: parameters?.presence_penalty, }; const openAICompletion = await openai.completions.create(body, { body: { ...body, ...extraBody }, headers: { "ChatUI-Conversation-ID": conversationId?.toString() ?? "", "X-use-cache": "false", }, }); return openAICompletionToTextGenerationStream(openAICompletion); }; } else if (completion === "chat_completions") { return async ({ messages, preprompt, generateSettings, tools, toolResults, conversationId, }) => { let messagesOpenAI: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = await prepareMessages(messages, imageProcessor, !model.tools && model.multimodal); if (messagesOpenAI?.[0]?.role !== "system") { messagesOpenAI = [{ role: "system", content: "" }, ...messagesOpenAI]; } if (messagesOpenAI?.[0]) { messagesOpenAI[0].content = preprompt ?? ""; } // if system role is not supported, convert first message to a user message. if (!model.systemRoleSupported && messagesOpenAI?.[0]?.role === "system") { messagesOpenAI[0] = { ...messagesOpenAI[0], role: "user", }; } if (toolResults && toolResults.length > 0) { const toolCallRequests: OpenAI.Chat.Completions.ChatCompletionAssistantMessageParam = { role: "assistant", content: null, tool_calls: [], }; const responses: Array<OpenAI.Chat.Completions.ChatCompletionToolMessageParam> = []; for (const result of toolResults) { const id = uuidv4(); const toolCallResult: OpenAI.Chat.Completions.ChatCompletionMessageToolCall = { type: "function", function: { name: result.call.name, arguments: JSON.stringify(result.call.parameters), }, id, }; toolCallRequests.tool_calls?.push(toolCallResult); const toolCallResponse: OpenAI.Chat.Completions.ChatCompletionToolMessageParam = { role: "tool", content: "", tool_call_id: id, }; if ("outputs" in result) { toolCallResponse.content = JSON.stringify(result.outputs); } responses.push(toolCallResponse); } messagesOpenAI.push(toolCallRequests); messagesOpenAI.push(...responses); } const parameters = { ...model.parameters, ...generateSettings }; const toolCallChoices = createChatCompletionToolsArray(tools); const body: ChatCompletionCreateParamsStreaming = { model: model.id ?? model.name, messages: messagesOpenAI, stream: true, ...(useCompletionTokens ? { max_completion_tokens: parameters?.max_new_tokens } : { max_tokens: parameters?.max_new_tokens }), stop: parameters?.stop, temperature: parameters?.temperature, top_p: parameters?.top_p, frequency_penalty: parameters?.repetition_penalty, presence_penalty: parameters?.presence_penalty, ...(toolCallChoices.length > 0 ? { tools: toolCallChoices, tool_choice: "auto" } : {}), }; const openChatAICompletion = await openai.chat.completions.create(body, { body: { ...body, ...extraBody }, headers: { "ChatUI-Conversation-ID": conversationId?.toString() ?? "", "X-use-cache": "false", }, }); return openAIChatToTextGenerationStream(openChatAICompletion); }; } else { throw new Error("Invalid completion type"); } } async function prepareMessages( messages: EndpointMessage[], imageProcessor: ReturnType<typeof makeImageProcessor>, isMultimodal: boolean ): Promise<OpenAI.Chat.Completions.ChatCompletionMessageParam[]> { return Promise.all( messages.map(async (message) => { if (message.from === "user" && isMultimodal) { return { role: message.from, content: [ ...(await prepareFiles(imageProcessor, message.files ?? [])), { type: "text", text: message.content }, ], }; } return { role: message.from, content: message.content, }; }) ); } async function prepareFiles( imageProcessor: ReturnType<typeof makeImageProcessor>, files: MessageFile[] ): Promise<OpenAI.Chat.Completions.ChatCompletionContentPartImage[]> { const processedFiles = await Promise.all( files.filter((file) => file.mime.startsWith("image/")).map(imageProcessor) ); return processedFiles.map((file) => ({ type: "image_url" as const, image_url: { url: `data:${file.mime};base64,${file.image.toString("base64")}`, }, })); }
chat-ui/src/lib/server/endpoints/openai/endpointOai.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/openai/endpointOai.ts", "repo_id": "chat-ui", "token_count": 3562 }
import type { ConfigTool } from "$lib/types/Tool"; import { ObjectId } from "mongodb"; import vm from "node:vm"; const calculator: ConfigTool = { _id: new ObjectId("00000000000000000000000C"), type: "config", description: "Calculate the result of a mathematical expression", color: "blue", icon: "code", displayName: "Calculator", name: "calculator", endpoint: null, inputs: [ { name: "equation", type: "str", description: "A mathematical expression to be evaluated. The result of the expression will be returned.", paramType: "required", }, ], outputComponent: null, outputComponentIdx: null, showOutput: false, async *call({ equation }) { try { const blocks = String(equation).split("\n"); const query = blocks[blocks.length - 1].replace(/[^-()\d/*+.]/g, ""); return { outputs: [{ calculator: `${query} = ${vm.runInNewContext(query)}` }], }; } catch (cause) { throw new Error("Invalid expression", { cause }); } }, }; export default calculator;
chat-ui/src/lib/server/tools/calculator.ts/0
{ "file_path": "chat-ui/src/lib/server/tools/calculator.ts", "repo_id": "chat-ui", "token_count": 360 }
import type { SerializedHTMLElement } from "../../scrape/types"; import { MarkdownElementType, type MarkdownElement } from "../types"; // --- Markdown Elements --- /** Converts markdown element to a string with formatting */ export function stringifyMarkdownElement(elem: MarkdownElement): string { const content = elem.content.trim(); if (elem.type === MarkdownElementType.Header) return `${"#".repeat(elem.level)} ${content}\n\n`; if (elem.type === MarkdownElementType.BlockQuote) { return `${"> ".repeat(elem.depth)}${content}\n\n`; } if (elem.type === MarkdownElementType.CodeBlock) return `\`\`\`\n${content}\n\`\`\`\n\n`; if (elem.type === MarkdownElementType.UnorderedListItem) return `- ${content}\n`; if (elem.type === MarkdownElementType.OrderedListItem) { const siblings = elem.parent?.children ?? [elem]; const currentIndex = siblings.indexOf(elem); const lastAdjacentIndex = siblings .slice(currentIndex + 1) .findLastIndex((child) => child.type === MarkdownElementType.OrderedListItem); const order = currentIndex - lastAdjacentIndex + 1; return `${order}. ${content}\n`; } return `${content}\n\n`; } /** Converts a tree of markdown elements to a string with formatting */ export function stringifyMarkdownElementTree(elem: MarkdownElement): string { const stringified = stringifyMarkdownElement(elem); if (!("children" in elem)) return stringified; return stringified + elem.children.map(stringifyMarkdownElementTree).join(""); } // ----- HTML Elements ----- /** Ignores all non-inline tag types and grabs their text. Converts inline tags to markdown */ export function stringifyHTMLElements(elems: (SerializedHTMLElement | string)[]): string { return elems.map(stringifyHTMLElement).join("").trim(); } /** Ignores all non-inline tag types and grabs their text. Converts inline tags to markdown */ export function stringifyHTMLElement(elem: SerializedHTMLElement | string): string { if (typeof elem === "string") return elem; if (elem.tagName === "br") return "\n"; const content = elem.content.map(stringifyHTMLElement).join(""); if (content.length === 0) return content; if (elem.tagName === "strong" || elem.tagName === "b") return `**${content}**`; if (elem.tagName === "em" || elem.tagName === "i") return `*${content}*`; if (elem.tagName === "s" || elem.tagName === "strike") return `~~${content}~~`; if (elem.tagName === "code" || elem.tagName === "var" || elem.tagName === "tt") { return `\`${content}\``; } if (elem.tagName === "sup") return `<sup>${content}</sup>`; if (elem.tagName === "sub") return `<sub>${content}</sub>`; if (elem.tagName === "a" && content.trim().length > 0) { const href = elem.attributes.href; if (!href) return elem.content.map(stringifyHTMLElement).join(""); return `[${elem.content.map(stringifyHTMLElement).join("")}](${href})`; } return elem.content.map(stringifyHTMLElement).join(""); } /** Grabs all text content directly, ignoring HTML tags */ export function stringifyHTMLElementsUnformatted( elems: (SerializedHTMLElement | string)[] ): string { return elems.map(stringifyHTMLElementUnformatted).join(""); } /** Grabs all text content directly, ignoring HTML tags */ function stringifyHTMLElementUnformatted(elem: SerializedHTMLElement | string): string { if (typeof elem === "string") return elem; return elem.content.map(stringifyHTMLElementUnformatted).join(""); }
chat-ui/src/lib/server/websearch/markdown/utils/stringify.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/utils/stringify.ts", "repo_id": "chat-ui", "token_count": 1149 }