text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.models.seggpt.modeling_seggpt import SegGptImageSegmentationOutput if is_vision_available(): from PIL import Image from transformers import SegGptImageProcessor class SegGptImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def expected_post_processed_shape(self): return self.size["height"] // 2, self.size["width"] def get_fake_image_segmentation_output(self): torch.manual_seed(42) return SegGptImageSegmentationOutput( pred_masks=torch.rand(self.batch_size, self.num_channels, self.size["height"], self.size["width"]) ) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) def prepare_mask(): ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] return ds[0]["mask"].convert("L") def prepare_img(): ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] images = [image.convert("RGB") for image in ds["image"]] masks = [image.convert("RGB") for image in ds["mask"]] return images, masks @require_torch @require_vision class SegGptImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = SegGptImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = SegGptImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_image_processor_palette(self): num_labels = 3 image_processing = self.image_processing_class(**self.image_processor_dict) palette = image_processing.get_palette(num_labels) self.assertEqual(len(palette), num_labels + 1) self.assertEqual(palette[0], (0, 0, 0)) def test_mask_equivalence(self): image_processor = SegGptImageProcessor() mask_binary = prepare_mask() mask_rgb = mask_binary.convert("RGB") inputs_binary = image_processor(images=None, prompt_masks=mask_binary, return_tensors="pt") inputs_rgb = image_processor(images=None, prompt_masks=mask_rgb, return_tensors="pt", do_convert_rgb=False) self.assertTrue((inputs_binary["prompt_masks"] == inputs_rgb["prompt_masks"]).all().item()) def test_mask_to_rgb(self): image_processing = self.image_processing_class(**self.image_processor_dict) mask = prepare_mask() mask = np.array(mask) mask = (mask > 0).astype(np.uint8) def check_two_colors(image, color1=(0, 0, 0), color2=(255, 255, 255)): pixels = image.transpose(1, 2, 0).reshape(-1, 3) unique_colors = np.unique(pixels, axis=0) if len(unique_colors) == 2 and (color1 in unique_colors) and (color2 in unique_colors): return True else: return False num_labels = 1 palette = image_processing.get_palette(num_labels) # Should only duplicate repeat class indices map, hence only (0,0,0) and (1,1,1) mask_duplicated = image_processing.mask_to_rgb(mask) # Mask using palette, since only 1 class is present we have colors (0,0,0) and (255,255,255) mask_painted = image_processing.mask_to_rgb(mask, palette=palette) self.assertTrue(check_two_colors(mask_duplicated, color2=(1, 1, 1))) self.assertTrue(check_two_colors(mask_painted, color2=(255, 255, 255))) def test_post_processing_semantic_segmentation(self): image_processor = self.image_processing_class(**self.image_processor_dict) outputs = self.image_processor_tester.get_fake_image_segmentation_output() post_processed = image_processor.post_process_semantic_segmentation(outputs) self.assertEqual(len(post_processed), self.image_processor_tester.batch_size) expected_semantic_map_shape = self.image_processor_tester.expected_post_processed_shape() self.assertEqual(post_processed[0].shape, expected_semantic_map_shape) @slow def test_pixel_values(self): images, masks = prepare_img() input_image = images[1] prompt_image = images[0] prompt_mask = masks[0] image_processor = SegGptImageProcessor.from_pretrained("BAAI/seggpt-vit-large") inputs = image_processor( images=input_image, prompt_images=prompt_image, prompt_masks=prompt_mask, return_tensors="pt", do_convert_rgb=False, ) # Verify pixel values expected_prompt_pixel_values = torch.tensor( [ [[-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965]], [[1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583]], [[2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088]], ] ) expected_pixel_values = torch.tensor( [ [[1.6324, 1.6153, 1.5810], [1.6153, 1.5982, 1.5810], [1.5810, 1.5639, 1.5639]], [[1.2731, 1.2556, 1.2206], [1.2556, 1.2381, 1.2031], [1.2206, 1.2031, 1.1681]], [[1.6465, 1.6465, 1.6465], [1.6465, 1.6465, 1.6465], [1.6291, 1.6291, 1.6291]], ] ) expected_prompt_masks = torch.tensor( [ [[-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179]], [[-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357]], [[-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044]], ] ) torch.testing.assert_close(inputs.pixel_values[0, :, :3, :3], expected_pixel_values, rtol=1e-4, atol=1e-4) torch.testing.assert_close( inputs.prompt_pixel_values[0, :, :3, :3], expected_prompt_pixel_values, rtol=1e-4, atol=1e-4 ) torch.testing.assert_close(inputs.prompt_masks[0, :, :3, :3], expected_prompt_masks, rtol=1e-4, atol=1e-4) def test_prompt_mask_equivalence(self): image_processor = self.image_processing_class(**self.image_processor_dict) image_size = self.image_processor_tester.image_size # Single Mask Examples expected_single_shape = [1, 3, image_size, image_size] # Single Semantic Map (2D) image_np_2d = np.ones((image_size, image_size)) image_pt_2d = torch.ones((image_size, image_size)) image_pil_2d = Image.fromarray(image_np_2d) inputs_np_2d = image_processor(images=None, prompt_masks=image_np_2d, return_tensors="pt") inputs_pt_2d = image_processor(images=None, prompt_masks=image_pt_2d, return_tensors="pt") inputs_pil_2d = image_processor(images=None, prompt_masks=image_pil_2d, return_tensors="pt") self.assertTrue((inputs_np_2d["prompt_masks"] == inputs_pt_2d["prompt_masks"]).all().item()) self.assertTrue((inputs_np_2d["prompt_masks"] == inputs_pil_2d["prompt_masks"]).all().item()) self.assertEqual(list(inputs_np_2d["prompt_masks"].shape), expected_single_shape) # Single RGB Images (3D) image_np_3d = np.ones((3, image_size, image_size)) image_pt_3d = torch.ones((3, image_size, image_size)) image_pil_3d = Image.fromarray(image_np_3d.transpose(1, 2, 0).astype(np.uint8)) inputs_np_3d = image_processor( images=None, prompt_masks=image_np_3d, return_tensors="pt", do_convert_rgb=False ) inputs_pt_3d = image_processor( images=None, prompt_masks=image_pt_3d, return_tensors="pt", do_convert_rgb=False ) inputs_pil_3d = image_processor( images=None, prompt_masks=image_pil_3d, return_tensors="pt", do_convert_rgb=False ) self.assertTrue((inputs_np_3d["prompt_masks"] == inputs_pt_3d["prompt_masks"]).all().item()) self.assertTrue((inputs_np_3d["prompt_masks"] == inputs_pil_3d["prompt_masks"]).all().item()) self.assertEqual(list(inputs_np_3d["prompt_masks"].shape), expected_single_shape) # Batched Examples expected_batched_shape = [2, 3, image_size, image_size] # Batched Semantic Maps (3D) image_np_2d_batched = np.ones((2, image_size, image_size)) image_pt_2d_batched = torch.ones((2, image_size, image_size)) inputs_np_2d_batched = image_processor(images=None, prompt_masks=image_np_2d_batched, return_tensors="pt") inputs_pt_2d_batched = image_processor(images=None, prompt_masks=image_pt_2d_batched, return_tensors="pt") self.assertTrue((inputs_np_2d_batched["prompt_masks"] == inputs_pt_2d_batched["prompt_masks"]).all().item()) self.assertEqual(list(inputs_np_2d_batched["prompt_masks"].shape), expected_batched_shape) # Batched RGB images image_np_4d = np.ones((2, 3, image_size, image_size)) image_pt_4d = torch.ones((2, 3, image_size, image_size)) inputs_np_4d = image_processor( images=None, prompt_masks=image_np_4d, return_tensors="pt", do_convert_rgb=False ) inputs_pt_4d = image_processor( images=None, prompt_masks=image_pt_4d, return_tensors="pt", do_convert_rgb=False ) self.assertTrue((inputs_np_4d["prompt_masks"] == inputs_pt_4d["prompt_masks"]).all().item()) self.assertEqual(list(inputs_np_4d["prompt_masks"].shape), expected_batched_shape) # Comparing Single and Batched Examples self.assertTrue((inputs_np_2d["prompt_masks"][0] == inputs_np_3d["prompt_masks"][0]).all().item()) self.assertTrue((inputs_np_2d_batched["prompt_masks"][0] == inputs_np_2d["prompt_masks"][0]).all().item()) self.assertTrue((inputs_np_2d_batched["prompt_masks"][0] == inputs_np_3d["prompt_masks"][0]).all().item()) self.assertTrue((inputs_np_2d_batched["prompt_masks"][0] == inputs_np_4d["prompt_masks"][0]).all().item()) self.assertTrue((inputs_np_2d_batched["prompt_masks"][0] == inputs_np_3d["prompt_masks"][0]).all().item())
transformers/tests/models/seggpt/test_image_processing_seggpt.py/0
{ "file_path": "transformers/tests/models/seggpt/test_image_processing_seggpt.py", "repo_id": "transformers", "token_count": 6065 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the TensorFlow Speech2Text model.""" from __future__ import annotations import inspect import unittest from transformers import Speech2TextConfig from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property, is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import Speech2TextProcessor, TFSpeech2TextForConditionalGeneration, TFSpeech2TextModel def prepare_speech_to_text_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = tf.math.not_equal(input_features, 0) if decoder_attention_mask is None: decoder_attention_mask = tf.math.not_equal(decoder_input_ids, config.pad_token_id) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_features": input_features, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFSpeech2TextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, num_conv_layers=2, conv_kernel_sizes=(5, 5), conv_channels=32, input_feat_per_channel=24, input_channels=1, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=20, max_target_positions=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, scale_embedding=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.num_conv_layers = num_conv_layers self.conv_kernel_sizes = conv_kernel_sizes self.conv_channels = conv_channels self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.scale_embedding = scale_embedding def prepare_config_and_inputs(self): input_features = floats_tensor( [self.batch_size, self.seq_length, self.input_feat_per_channel], self.vocab_size ) attention_mask = tf.ones([self.batch_size, self.seq_length], dtype=tf.int64) decoder_input_ids = tf.math.maximum(ids_tensor([self.batch_size, self.seq_length], self.vocab_size), 2) config = self.get_config() inputs_dict = prepare_speech_to_text_inputs_dict( config, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) return config, inputs_dict def get_config(self): return Speech2TextConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, num_conv_layers=self.num_conv_layers, conv_kernel_sizes=self.conv_kernel_sizes, conv_channels=self.conv_channels, input_feat_per_channel=self.input_feat_per_channel, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, scale_embedding=self.scale_embedding, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for _ in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFSpeech2TextModel(config=config).get_decoder() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) _, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = tf.math.maximum(ids_tensor((self.batch_size, 3), config.vocab_size), 2) next_attn_mask = ids_tensor((self.batch_size, 3), 2, dtype=tf.int64) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, atol=1e-2) @require_tf class TFSpeech2TextModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFSpeech2TextModel, TFSpeech2TextForConditionalGeneration) if is_tf_available() else () all_generative_model_classes = (TFSpeech2TextForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFSpeech2TextModel} if is_tf_available() else {} is_encoder_decoder = True test_pruning = False test_missing_keys = False test_onnx = False input_name = "input_ids" def setUp(self): self.model_tester = TFSpeech2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Speech2TextConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) # not implemented currently def test_inputs_embeds(self): pass # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_generate_fp16(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_resize_token_embeddings(self): # Overwritten method from parent; see `test_resize_embeddings_untied` pass def test_resize_tokens_embeddings(self): # see `test_resize_embeddings_untied` pass def test_resize_embeddings_untied(self): # TODO: copy test from PT. Not working at the moment because the test relies on `model.resize_token_embeddings`, # whose TF implementation assumes the use of `TFWrappedEmbeddings`. But with a `TFWrappedEmbeddings` we can't # load the weights from PT (also, it induces TF1 behavior, so we might want to rework how # `model.resize_token_embeddings` operates). pass def test_generate_without_input_ids(self): pass def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape[:2] subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions # encoder self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is # `input_features` def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_features with self.assertRaises(AssertionError): model.generate(do_sample=True, max_length=5) # num_return_sequences = 1 self._check_generated_ids(model.generate(input_features, do_sample=True)) with self.assertRaises(ValueError): # generating multiple sequences when no beam search generation # is not allowed as it would always generate the same sequences model.generate(input_features, do_sample=False, num_return_sequences=2) # num_return_sequences > 1, sample self._check_generated_ids(model.generate(input_features, do_sample=True, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is # `input_features` def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids, num_return_sequences = 1 self._check_generated_ids(model.generate(input_features, do_sample=True, num_beams=2)) with self.assertRaises(ValueError): # generating more sequences than having beams leads is not possible model.generate(input_features, do_sample=False, num_return_sequences=3, num_beams=2) # num_return_sequences > 1, sample self._check_generated_ids( model.generate( input_features, do_sample=True, num_beams=2, num_return_sequences=2, ) ) # num_return_sequences > 1, greedy self._check_generated_ids( model.generate(input_features, do_sample=False, num_beams=2, num_return_sequences=2) ) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) # overwritten from parent -- the input is `input_features`, not `input_ids` def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_pt_tf_model_equivalence(self, allow_missing_keys=True): # Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) @require_tf @require_sentencepiece @require_tokenizers @slow class TFSpeech2TextModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_generation_librispeech(self): model = TFSpeech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") processor = self.default_processor input_speech = self._load_datasamples(1) input_features = processor(input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features) generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" ] self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS) def test_generation_librispeech_batched(self): model = TFSpeech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") processor = self.default_processor input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="tf", padding=True) generated_ids = model.generate(inputs.input_features, attention_mask=inputs.attention_mask) generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister cultar's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and roast beef looming before us" " similes drawn from eating and its results occur most readily to the mind", "he has grave doubts whether sir frederick leyton's work is really greek after all and can discover in it" " but little of rocky ithaca", ] self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS)
transformers/tests/models/speech_to_text/test_modeling_tf_speech_to_text.py/0
{ "file_path": "transformers/tests/models/speech_to_text/test_modeling_tf_speech_to_text.py", "repo_id": "transformers", "token_count": 11558 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Swin2SR model.""" import unittest from transformers import Swin2SRConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Swin2SRForImageSuperResolution, Swin2SRModel if is_vision_available(): from PIL import Image from transformers import Swin2SRImageProcessor class Swin2SRModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=1, num_channels=3, num_channels_out=1, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=False, upscale=2, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_channels_out = num_channels_out self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.upscale = upscale # here we set some attributes to make tests pass self.num_hidden_layers = len(depths) self.hidden_size = embed_dim self.seq_length = (image_size // patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return Swin2SRConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_channels_out=self.num_channels_out, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, upscale=self.upscale, ) def create_and_check_model(self, config, pixel_values, labels): model = Swin2SRModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.embed_dim, self.image_size, self.image_size) ) def create_and_check_for_image_super_resolution(self, config, pixel_values, labels): model = Swin2SRForImageSuperResolution(config) model.to(torch_device) model.eval() result = model(pixel_values) expected_image_size = self.image_size * self.upscale self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels_out, expected_image_size, expected_image_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Swin2SRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Swin2SRModel, Swin2SRForImageSuperResolution) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": Swin2SRModel, "image-to-image": Swin2SRForImageSuperResolution} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = Swin2SRModelTester(self) self.config_tester = ConfigTester( self, config_class=Swin2SRConfig, embed_dim=37, has_text_modality=False, common_properties=["image_size", "patch_size", "num_channels"], ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_image_super_resolution(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_super_resolution(*config_and_inputs) # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="Swin2SR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Swin2SR does not support training yet") def test_training(self): pass @unittest.skip(reason="Swin2SR does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) @slow def test_model_from_pretrained(self): model_name = "caidas/swin2SR-classical-sr-x2-64" model = Swin2SRModel.from_pretrained(model_name) self.assertIsNotNone(model) # overwriting because of `logit_scale` parameter def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "logit_scale" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) @require_vision @require_torch @slow class Swin2SRModelIntegrationTest(unittest.TestCase): def test_inference_image_super_resolution_head(self): processor = Swin2SRImageProcessor() model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-classical-sr-x2-64").to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 3, 976, 1296]) self.assertEqual(outputs.reconstruction.shape, expected_shape) expected_slice = torch.tensor( [[0.5458, 0.5546, 0.5638], [0.5526, 0.5565, 0.5651], [0.5396, 0.5426, 0.5621]] ).to(torch_device) torch.testing.assert_close(outputs.reconstruction[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) def test_inference_fp16(self): processor = Swin2SRImageProcessor() model = Swin2SRForImageSuperResolution.from_pretrained( "caidas/swin2SR-classical-sr-x2-64", torch_dtype=torch.float16 ).to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, return_tensors="pt").to(model.dtype).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 3, 976, 1296]) self.assertEqual(outputs.reconstruction.shape, expected_shape) expected_slice = torch.tensor( [[0.5454, 0.5542, 0.5640], [0.5518, 0.5562, 0.5649], [0.5391, 0.5425, 0.5620]], dtype=model.dtype ).to(torch_device) torch.testing.assert_close(outputs.reconstruction[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/swin2sr/test_modeling_swin2sr.py/0
{ "file_path": "transformers/tests/models/swin2sr/test_modeling_swin2sr.py", "repo_id": "transformers", "token_count": 6153 }
# coding=utf-8 # Copyright 2023 The Intel Team Authors, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.image_transforms import PaddingMode from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import TvpImageProcessor class TvpImageProcessingTester: def __init__( self, parent, do_resize: bool = True, size: Dict[str, int] = {"longest_edge": 40}, do_center_crop: bool = False, crop_size: Dict[str, int] = None, do_rescale: bool = False, rescale_factor: Union[int, float] = 1 / 255, do_pad: bool = True, pad_size: Dict[str, int] = {"height": 80, "width": 80}, fill: int = None, pad_mode: PaddingMode = None, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073], image_std: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711], batch_size=2, min_resolution=40, max_resolution=80, num_channels=3, num_frames=2, ): self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size self.fill = fill self.pad_mode = pad_mode self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.num_frames = num_frames def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "do_center_crop": self.do_center_crop, "do_pad": self.do_pad, "pad_size": self.pad_size, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to TvpImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: return (int(self.pad_size["height"]), int(self.pad_size["width"])) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class TvpImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = TvpImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = TvpImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "pad_size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"longest_edge": 40}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size={"longest_edge": 12}) self.assertEqual(image_processor.size, {"longest_edge": 12}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL videos video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy_4_channels(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing( video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing( video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
transformers/tests/models/tvp/test_image_processing_tvp.py/0
{ "file_path": "transformers/tests/models/tvp/test_image_processing_tvp.py", "repo_id": "transformers", "token_count": 5459 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch UperNet framework.""" import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import ( require_timm, require_torch, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UperNetModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 1, 1], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, out_features=["stage2", "stage3", "stage4"], num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.out_features = out_features self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_backbone_config(self): return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def get_config(self): return UperNetConfig( backbone_config=self.get_backbone_config(), backbone=None, hidden_size=64, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=32, auxiliary_num_convs=1, auxiliary_concat_input=False, loss_ignore_index=255, num_labels=self.num_labels, ) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels): model = UperNetForSemanticSegmentation(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UperNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as UperNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (UperNetForSemanticSegmentation,) if is_torch_available() else () pipeline_model_mapping = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = UperNetModelTester(self) self.config_tester = ConfigTester( self, config_class=UperNetConfig, has_text_modality=False, hidden_size=37, common_properties=["hidden_size"], ) def test_config(self): self.config_tester.run_common_tests() def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @unittest.skip(reason="UperNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="UperNet does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="UperNet does not have a base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="UperNet does not have a base model") def test_save_load_fast_init_to_base(self): pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`") def test_multi_gpu_data_parallel_forward(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.backbone_config = _config_zero_init(configs_no_init.backbone_config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_timm def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() config.backbone_config = None config.backbone_kwargs = {"out_indices": [1, 2, 3]} config.use_pretrained_backbone = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices config.backbone = "resnet18" config.use_timm_backbone = True for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "UperNetForUniversalSegmentation": self.assertEqual(model.backbone.out_indices, [1, 2, 3]) # Load a HF backbone config.backbone = "microsoft/resnet-18" config.use_timm_backbone = False for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "UperNetForUniversalSegmentation": self.assertEqual(model.backbone.out_indices, [1, 2, 3]) @unittest.skip(reason="UperNet does not have tied weights") def test_tied_model_weights_key_ignore(self): pass @slow def test_model_from_pretrained(self): model_name = "openmmlab/upernet-convnext-tiny" model = UperNetForSemanticSegmentation.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of ADE20k def prepare_img(): filepath = hf_hub_download( repo_id="hf-internal-testing/fixtures_ade20k", repo_type="dataset", filename="ADE_val_00000001.jpg" ) image = Image.open(filepath).convert("RGB") return image @require_torch @require_vision @slow class UperNetModelIntegrationTest(unittest.TestCase): def test_inference_swin_backbone(self): processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny") model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny").to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) def test_inference_convnext_backbone(self): processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny") model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny").to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/upernet/test_modeling_upernet.py/0
{ "file_path": "transformers/tests/models/upernet/test_modeling_upernet.py", "repo_id": "transformers", "token_count": 5294 }
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unittest from transformers import XGLMConfig, is_torch_available from transformers.testing_utils import ( cleanup, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import XGLMForCausalLM, XGLMModel, XGLMTokenizer class XGLMModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, d_model=32, num_hidden_layers=2, num_attention_heads=4, ffn_dim=37, activation_function="gelu", activation_dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = d_model self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.activation_function = activation_function self.activation_dropout = activation_dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = 0 self.eos_token_id = 2 self.pad_token_id = 1 def get_large_model_config(self): return XGLMConfig.from_pretrained("facebook/xglm-564M") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config(gradient_checkpointing=gradient_checkpointing) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, input_mask, head_mask, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, ) def create_and_check_xglm_model(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, head_mask=head_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.num_hidden_layers) def create_and_check_xglm_model_past(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_xglm_model_attention_mask_past(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.zeros((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_xglm_model_past_large_inputs(self, config, input_ids, input_mask, head_mask, *args): model = XGLMModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) output, past = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=1) # append to next input_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[ "last_hidden_state" ] self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, *args): model = XGLMForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, head_mask, *args, gradient_checkpointing=False ): model = XGLMForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_xglm_weight_initialization(self, config, *args): model = XGLMModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, head_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class XGLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (XGLMModel, XGLMForCausalLM) if is_torch_available() else () all_generative_model_classes = (XGLMForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": XGLMModel, "text-generation": XGLMForCausalLM} if is_torch_available() else {} ) fx_compatible = True test_missing_keys = False test_pruning = False def setUp(self): self.model_tester = XGLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_xglm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model(*config_and_inputs) def test_xglm_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_past(*config_and_inputs) def test_xglm_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_attention_mask_past(*config_and_inputs) def test_xglm_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model_past_large_inputs(*config_and_inputs) def test_xglm_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_xglm_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_xglm_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_weight_initialization(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/xglm-564M" model = XGLMModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): super().test_model_parallelism() @require_torch class XGLMModelLanguageGenerationTest(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device, gc_collect=True) def _test_lm_generate_xglm_helper( self, gradient_checkpointing=False, verify_outputs=True, ): model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") if gradient_checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(torch_device) input_ids = torch.tensor([[2, 268, 9865]], dtype=torch.long, device=torch_device) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: skip output_ids = model.generate(input_ids, do_sample=False, num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].tolist(), expected_output_ids) @slow def test_batch_generation(self): model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") model.to(torch_device) tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), max_new_tokens=12 ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=12) inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=12) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) @slow def test_lm_generate_xglm(self): self._test_lm_generate_xglm_helper() @slow def test_lm_generate_xglm_with_gradient_checkpointing(self): self._test_lm_generate_xglm_helper(gradient_checkpointing=True) @slow def test_xglm_sample(self): tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") torch.manual_seed(0) tokenized = tokenizer("Today is a nice day and", return_tensors="pt") input_ids = tokenized.input_ids output_ids = model.generate(input_ids, do_sample=True, num_beams=1) output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) EXPECTED_OUTPUT_STRS = [ # TODO: remove this once we move to torch 2.0 # torch 1.13.1 + cu116 "Today is a nice day and the sun is shining. A nice day with warm rainy", # torch 2.0 + cu117 "Today is a nice day and the water is still cold. We just stopped off for some fresh", ] self.assertIn(output_str, EXPECTED_OUTPUT_STRS) @require_torch_accelerator @require_torch_fp16 def test_batched_nan_fp16(self): model_name = "facebook/xglm-564M" tokenizer = XGLMTokenizer.from_pretrained(model_name, use_fast=False, padding_side="left") model = XGLMForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, use_cache=True).to(torch_device) model = model.eval() batch = tokenizer(["Who are you?", "Joe Biden is the president of"], padding=True, return_tensors="pt") input_ids = batch["input_ids"].to(torch_device) attention_mask = batch["attention_mask"].to(torch_device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) self.assertFalse( torch.isnan(outputs.logits[0]).any().item() ) # the first logits could contain NaNs if it fails
transformers/tests/models/xglm/test_modeling_xglm.py/0
{ "file_path": "transformers/tests/models/xglm/test_modeling_xglm.py", "repo_id": "transformers", "token_count": 8225 }
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, get_wsd_schedule, ) def unwrap_schedule(scheduler, num_steps=10): lrs = [] for _ in range(num_steps): lrs.append(scheduler.get_lr()[0]) scheduler.step() return lrs def unwrap_and_save_reload_schedule(scheduler, num_steps=10): lrs = [] for step in range(num_steps): lrs.append(scheduler.get_lr()[0]) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: file_name = os.path.join(tmpdirname, "schedule.bin") torch.save(scheduler.state_dict(), file_name) state_dict = torch.load(file_name, weights_only=False) scheduler.load_state_dict(state_dict) return lrs @require_torch class OptimizationTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) def test_adam_w(self): w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True) target = torch.tensor([0.4, 0.2, -0.5]) criterion = nn.MSELoss() # No warmup, constant schedule, no gradient clipping optimizer = AdamW(params=[w], lr=2e-1, weight_decay=0.0) for _ in range(100): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) def test_adafactor(self): w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True) target = torch.tensor([0.4, 0.2, -0.5]) criterion = nn.MSELoss() # No warmup, constant schedule, no gradient clipping optimizer = Adafactor( params=[w], lr=1e-2, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False, ) for _ in range(1000): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) @require_torch class ScheduleInitTest(unittest.TestCase): m = nn.Linear(50, 50) if is_torch_available() else None optimizer = AdamW(m.parameters(), lr=10.0) if is_torch_available() else None num_steps = 10 def assertListAlmostEqual(self, list1, list2, tol, msg=None): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol, msg=msg) def test_schedulers(self): common_kwargs = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) scheds = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), get_wsd_schedule: ( {"num_warmup_steps": 2, "num_stable_steps": 2, "num_decay_steps": 3, "min_lr_ratio": 0.1}, [0.0, 5.0, 10.0, 10.0, 10.0, 7.75, 3.25, 1.0, 1.0, 1.0], ), } for scheduler_func, data in scheds.items(): kwargs, expected_learning_rates = data scheduler = scheduler_func(self.optimizer, **kwargs) self.assertEqual(len([scheduler.get_lr()[0]]), 1) lrs_1 = unwrap_schedule(scheduler, self.num_steps) self.assertListAlmostEqual( lrs_1, expected_learning_rates, tol=1e-2, msg=f"failed for {scheduler_func} in normal scheduler", ) scheduler = scheduler_func(self.optimizer, **kwargs) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(scheduler) # wrap to test picklability of the schedule lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps) self.assertListEqual(lrs_1, lrs_2, msg=f"failed for {scheduler_func} in save and reload") def test_get_scheduler(self): test_params = [ { "name": "warmup_stable_decay", "optimizer": self.optimizer, "num_warmup_steps": 2, "scheduler_specific_kwargs": {"num_stable_steps": 1, "num_decay_steps": 3}, }, { "name": "warmup_stable_decay", "optimizer": self.optimizer, "num_warmup_steps": 2, "num_training_steps": 10, "scheduler_specific_kwargs": {"num_stable_steps": 1, "num_decay_steps": 3}, }, {"name": "cosine", "optimizer": self.optimizer, "num_warmup_steps": 2, "num_training_steps": 10}, ] for param in test_params: self.assertTrue(get_scheduler(**param), msg=f"failed for {param['name']} in get_scheduler") class LambdaScheduleWrapper: """See https://github.com/huggingface/transformers/issues/21689""" def __init__(self, fn): self.fn = fn def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) @classmethod def wrap_scheduler(cls, scheduler): scheduler.lr_lambdas = list(map(cls, scheduler.lr_lambdas))
transformers/tests/optimization/test_optimization.py/0
{ "file_path": "transformers/tests/optimization/test_optimization.py", "repo_id": "transformers", "token_count": 4047 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import requests from huggingface_hub import ImageToTextOutput from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available from transformers.pipelines import ImageToTextPipeline, pipeline from transformers.testing_utils import ( compare_pipeline_output_to_hub_spec, is_pipeline_test, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ImageToTextPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): pipe = ImageToTextPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, ) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "./tests/fixtures/tests_samples/COCO/000000039769.png", ] return pipe, examples def run_pipeline_test(self, pipe, examples): outputs = pipe(examples) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}], [{"generated_text": ANY(str)}], ], ) @require_tf def test_small_model_tf(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) outputs = pipe(image, max_new_tokens=1) self.assertEqual( outputs, [{"generated_text": "growth"}], ) for single_output in outputs: compare_pipeline_output_to_hub_spec(single_output, ImageToTextOutput) @require_torch def test_small_model_pt(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) @require_torch def test_small_model_pt_conditional(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe(image, prompt=prompt) self.assertTrue(outputs[0]["generated_text"].startswith(prompt)) @require_torch def test_consistent_batching_behaviour(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe([image, image], prompt=prompt) self.assertTrue(outputs[0][0]["generated_text"].startswith(prompt)) self.assertTrue(outputs[1][0]["generated_text"].startswith(prompt)) outputs = pipe([image, image], prompt=prompt, batch_size=2) self.assertTrue(outputs[0][0]["generated_text"].startswith(prompt)) self.assertTrue(outputs[1][0]["generated_text"].startswith(prompt)) from torch.utils.data import Dataset class MyDataset(Dataset): def __len__(self): return 5 def __getitem__(self, i): return "./tests/fixtures/tests_samples/COCO/000000039769.png" dataset = MyDataset() for batch_size in (1, 2, 4): outputs = pipe(dataset, prompt=prompt, batch_size=batch_size if batch_size > 1 else None) self.assertTrue(list(outputs)[0][0]["generated_text"].startswith(prompt)) self.assertTrue(list(outputs)[1][0]["generated_text"].startswith(prompt)) @slow @require_torch def test_large_model_pt(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a pink pokemon pokemon with a blue shirt and a blue shirt"}]) @slow @require_torch def test_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cartoon of a purple character."}]) @slow @require_torch def test_conditional_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photography of" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photography of a volcano"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photo of a" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photo of a tent with a tent and a tent in the background."}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_pix2struct(self): pipe = pipeline("image-to-text", model="google/pix2struct-ai2d-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "ash cloud"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_tf def test_large_model_tf(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_conditional_generation_llava(self): pipe = pipeline("image-to-text", model="llava-hf/bakLlava-v1-hf") prompt = ( "<image>\nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT:" ) outputs = pipe( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg", prompt=prompt, generate_kwargs={"max_new_tokens": 200}, ) self.assertEqual( outputs, [ { "generated_text": "\nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT: Lava" } ], ) @slow @require_torch def test_nougat(self): pipe = pipeline("image-to-text", "facebook/nougat-base") outputs = pipe("https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/nougat_paper.png") self.assertEqual( outputs, [{"generated_text": "# Nougat: Neural Optical Understanding for Academic Documents\n\n Lukas Blec"}], )
transformers/tests/pipelines/test_pipelines_image_to_text.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_to_text.py", "repo_id": "transformers", "token_count": 5436 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub import ZeroShotImageClassificationOutputElement from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( compare_pipeline_output_to_hub_spec, is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ZeroShotImageClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLIP would be there for now. # model_mapping = {CLIPConfig: CLIPModel} # def get_test_pipeline(self, model, tokenizer, processor): # if tokenizer is None: # # Side effect of no Fast Tokenizer class for these model, so skipping # # But the slow tokenizer test should still run as they're quite small # self.skipTest(reason="No tokenizer available") # return # # return None, None # image_classifier = ZeroShotImageClassificationPipeline( # model=model, tokenizer=tokenizer, feature_extractor=processor # ) # # test with a raw waveform # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # return image_classifier, [image, image2] # def run_pipeline_test(self, pipe, examples): # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # outputs = pipe(image, candidate_labels=["A", "B"]) # self.assertEqual(outputs, {"text": ANY(str)}) # # Batching # outputs = pipe([image] * 3, batch_size=2, candidate_labels=["A", "B"]) @require_torch def test_small_model_pt(self, torch_dtype="float32"): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", torch_dtype=torch_dtype ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(output), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], [{"score": 0.333, "label": "b"}, {"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}], ], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) for single_output in output: compare_pipeline_output_to_hub_spec(single_output, ZeroShotImageClassificationOutputElement) @require_torch def test_small_model_pt_fp16(self): self.test_small_model_pt(torch_dtype="float16") @require_tf def test_small_model_tf(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) self.assertEqual( nested_simplify(output), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @slow @require_torch def test_large_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def test_large_model_tf(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_torch def test_siglip_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="google/siglip-base-patch16-224", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["2 cats", "a plane", "a remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["2 cats", "a plane", "a remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ] ] * 5, ) @slow @require_torch def test_blip2_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="Salesforce/blip2-itm-vit-g", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier( image, candidate_labels=["2 cats", "a plane", "a remote"], tokenizer_kwargs={"return_token_type_ids": False}, ) self.assertEqual( nested_simplify(output), [ {"score": 0.369, "label": "2 cats"}, {"score": 0.333, "label": "a remote"}, {"score": 0.297, "label": "a plane"}, ], ) output = image_classifier( [image] * 5, candidate_labels=["2 cats", "a plane", "a remote"], batch_size=2, tokenizer_kwargs={"return_token_type_ids": False}, ) self.assertEqual( nested_simplify(output), [ [ {"score": 0.369, "label": "2 cats"}, {"score": 0.333, "label": "a remote"}, {"score": 0.297, "label": "a plane"}, ] ] * 5, )
transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py", "repo_id": "transformers", "token_count": 6227 }
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, VptqConfig from transformers.testing_utils import ( require_accelerate, require_torch_gpu, require_torch_multi_gpu, require_vptq, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights class VptqConfigTest(unittest.TestCase): def test_to_dict(self): """ Makes sure the config format is properly set """ quantization_config = VptqConfig() vptq_orig_config = quantization_config.to_dict() self.assertEqual(vptq_orig_config["quant_method"], quantization_config.quant_method) @slow @require_torch_gpu @require_vptq @require_accelerate class VptqTest(unittest.TestCase): model_name = "VPTQ-community/Meta-Llama-3.1-8B-Instruct-v12-k65536-4096-woft" input_text = "Hello my name is" max_new_tokens = 32 EXPECTED_OUTPUT = "Hello my name is Sarah and I am a 25 year old woman from the United States. I am a college graduate and I am currently working as a marketing specialist for a small" device_map = "cuda" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_non_quantized(self): model_id = "facebook/opt-125m" quantization_config = VptqConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from vptq import VQuantLinear from transformers.integrations import replace_with_vptq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") modules_to_not_convert = ["lm_head"] names = [ "q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2", ] value = { "enable_norm": True, "enable_perm": True, "group_num": 1, "group_size": 128, "indices_as_float": False, "num_centroids": [-1, 128], "num_res_centroids": [-1, 128], "outlier_size": 0, "vector_lens": [-1, 12], } shared_layer_config = {} for name in names: shared_layer_config[name] = value for i in range(24): modules_to_not_convert.append("model.decoder.layers.{layer_idx}.fc1".format(layer_idx=i)) layer_configs = {} layer_configs["model.decoder.project_out"] = value layer_configs["model.decoder.project_in"] = value quantization_config = VptqConfig(config_for_layers=layer_configs, shared_layer_config=shared_layer_config) with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_vptq_linear(model, quantization_config=quantization_config) nb_vptq_linear = 0 for module in model.modules(): if isinstance(module, VQuantLinear): nb_vptq_linear += 1 self.assertEqual(nb_linears - 1, nb_vptq_linear) # Try with `linear_weights_not_to_quantize` with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) quantization_config = VptqConfig(config_for_layers=layer_configs, shared_layer_config=shared_layer_config) model, _ = replace_with_vptq_linear( model, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert ) nb_vptq_linear = 0 for module in model.modules(): if isinstance(module, VQuantLinear): nb_vptq_linear += 1 # 25 comes from 24 decoder.layers.{layer_idx}.fc1 # and the last lm_head self.assertEqual(nb_linears - 25, nb_vptq_linear)
transformers/tests/quantization/vptq_integration/test_vptq.py/0
{ "file_path": "transformers/tests/quantization/vptq_integration/test_vptq.py", "repo_id": "transformers", "token_count": 3072 }
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=job_name, instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(2,)]) def test_script(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
transformers/tests/sagemaker/test_multi_node_data_parallel.py/0
{ "file_path": "transformers/tests/sagemaker/test_multi_node_data_parallel.py", "repo_id": "transformers", "token_count": 1917 }
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import concurrent.futures import json import os import shutil import tempfile import unittest from transformers import AutoTokenizer, PreTrainedTokenizerFast from transformers.testing_utils import require_tokenizers from ..test_tokenization_common import TokenizerTesterMixin @require_tokenizers class PreTrainedTokenizationFastTest(TokenizerTesterMixin, unittest.TestCase): rust_tokenizer_class = PreTrainedTokenizerFast test_slow_tokenizer = False test_rust_tokenizer = True from_pretrained_vocab_key = "tokenizer_file" def setUp(self): self.test_rust_tokenizer = False # because we don't have pretrained_vocab_files_map super().setUp() self.test_rust_tokenizer = True model_paths = ["robot-test/dummy-tokenizer-fast", "robot-test/dummy-tokenizer-wordlevel"] self.bytelevel_bpe_model_name = "SaulLu/dummy-tokenizer-bytelevel-bpe" # Inclusion of 2 tokenizers to test different types of models (Unigram and WordLevel for the moment) self.tokenizers_list = [(PreTrainedTokenizerFast, model_path, {}) for model_path in model_paths] tokenizer = PreTrainedTokenizerFast.from_pretrained(model_paths[0]) tokenizer.save_pretrained(self.tmpdirname) @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_tokenizer_mismatch_warning(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_encode_decode_with_spaces(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_added_tokens_serialization(self): pass @unittest.skip( "We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model" ) def test_additional_special_tokens_serialization(self): pass @unittest.skip(reason="PreTrainedTokenizerFast is the only tokenizer that is not linked to any model") def test_prepare_for_model(self): pass @unittest.skip(reason="PreTrainedTokenizerFast doesn't have tokenizer_file in its signature") def test_rust_tokenizer_signature(self): pass def test_training_new_tokenizer(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_special_tokens_change(self): tmpdirname_orig = self.tmpdirname # Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel. for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): try: self.tmpdirname = tempfile.mkdtemp() tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer.save_pretrained(self.tmpdirname) super().test_training_new_tokenizer_with_special_tokens_change() finally: # Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer # is restored shutil.rmtree(self.tmpdirname) self.tmpdirname = tmpdirname_orig def test_training_new_tokenizer_with_bytelevel(self): tokenizer = self.rust_tokenizer_class.from_pretrained(self.bytelevel_bpe_model_name) toy_text_iterator = ("a" for _ in range(1000)) new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50) encoding_ids = new_tokenizer.encode("a🤗") self.assertEqual(encoding_ids, [64, 172, 253, 97, 245]) def test_init_from_tokenizers_model(self): from tokenizers import Tokenizer sentences = ["Hello, y'all!", "How are you 😁 ? There should not be any issue right?"] tokenizer = Tokenizer.from_pretrained("google-t5/t5-base") # Enable padding tokenizer.enable_padding(pad_id=0, pad_token="<pad>", length=512, pad_to_multiple_of=8) self.assertEqual( tokenizer.padding, { "length": 512, "pad_to_multiple_of": 8, "pad_id": 0, "pad_token": "<pad>", "pad_type_id": 0, "direction": "right", }, ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.pad_token_id, 0) self.assertEqual(tok.padding_side, "right") self.assertEqual(tok.pad_token, "<pad>") self.assertEqual(tok.init_kwargs["max_length"], 512) self.assertEqual(tok.init_kwargs["pad_to_multiple_of"], 8) self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip tokenizer.enable_truncation(8, stride=0, strategy="longest_first", direction="right") self.assertEqual( tokenizer.truncation, {"max_length": 8, "stride": 0, "strategy": "longest_first", "direction": "right"} ) fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) tmpdirname = tempfile.mkdtemp() fast_tokenizer.save_pretrained(tmpdirname) fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname) for tok in [fast_tokenizer, fast_from_saved]: self.assertEqual(tok.truncation_side, "right") self.assertEqual(tok.init_kwargs["truncation_strategy"], "longest_first") self.assertEqual(tok.init_kwargs["max_length"], 8) self.assertEqual(tok.init_kwargs["stride"], 0) # NOTE even if the model has a default max_length, it is not used... # thus tok(sentences, truncation = True) does nothing and does not warn either self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip @require_tokenizers class TokenizerVersioningTest(unittest.TestCase): def test_local_versioning(self): tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) json_tokenizer["model"]["vocab"]["huggingface"] = len(tokenizer) with tempfile.TemporaryDirectory() as tmp_dir: # Hack to save this in the tokenizer_config.json tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.4.0.0.json"] tokenizer.save_pretrained(tmp_dir) json.dump(json_tokenizer, open(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), "w")) # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer) + 1) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old tokenizer file as the version of Transformers is < 4.0.0 shutil.move(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), os.path.join(tmp_dir, "tokenizer.42.0.0.json")) tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.42.0.0.json"] tokenizer.save_pretrained(tmp_dir) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertEqual(len(new_tokenizer), len(tokenizer)) json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) def test_repo_versioning(self): # This repo has two tokenizer files, one for v4.0.0 and above with an added token, one for versions lower. repo = "hf-internal-testing/test-two-tokenizers" # This should pick the new tokenizer file as the version of Transformers is > 4.0.0 tokenizer = AutoTokenizer.from_pretrained(repo) self.assertEqual(len(tokenizer), 28997) json_tokenizer = json.loads(tokenizer._tokenizer.to_str()) self.assertIn("huggingface", json_tokenizer["model"]["vocab"]) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers old_transformers.tokenization_utils_base.__version__ = "3.0.0" old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo) self.assertEqual(len(old_tokenizer), 28996) json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str()) self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"]) @require_tokenizers class ReduceMutableBorrowTests(unittest.TestCase): def test_async_share_tokenizer(self): # See https://github.com/huggingface/transformers/pull/12550 # and https://github.com/huggingface/tokenizers/issues/537 tokenizer = PreTrainedTokenizerFast.from_pretrained("robot-test/dummy-tokenizer-wordlevel") text = "The Matrix is a 1999 science fiction action film." with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(self.fetch, tokenizer, text) for i in range(10)] return_value = [future.result() for future in futures] self.assertEqual(return_value, [[1, 10, 0, 8, 0, 18, 0, 0, 0, 2] for i in range(10)]) def fetch(self, tokenizer, text): return tokenizer.encode(text, truncation="longest_first", padding="longest")
transformers/tests/tokenization/test_tokenization_fast.py/0
{ "file_path": "transformers/tests/tokenization/test_tokenization_fast.py", "repo_id": "transformers", "token_count": 5106 }
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import warnings import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, filter_out_non_signature_kwargs, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class GenericTester(unittest.TestCase): def test_flatten_dict(self): input_dict = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } expected_dict = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(input_dict), expected_dict) def test_transpose_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(transpose(x), x.transpose())) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), x.transpose((1, 2, 0)))) @require_torch def test_transpose_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_tf def test_transpose_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_flax def test_transpose_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x), np.asarray(transpose(t)))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), np.asarray(transpose(t, axes=(1, 2, 0))))) def test_reshape_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.reshape(x, (4, 3)))) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.reshape(x, (12, 5)))) @require_torch def test_reshape_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_tf def test_reshape_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = tf.constant(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_flax def test_reshape_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), np.asarray(reshape(t, (4, 3))))) x = np.random.randn(3, 4, 5) t = jnp.array(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), np.asarray(reshape(t, (12, 5))))) def test_squeeze_numpy(self): x = np.random.randn(1, 3, 4) self.assertTrue(np.allclose(squeeze(x), np.squeeze(x))) x = np.random.randn(1, 4, 1, 5) self.assertTrue(np.allclose(squeeze(x, axis=2), np.squeeze(x, axis=2))) @require_torch def test_squeeze_torch(self): x = np.random.randn(1, 3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_tf def test_squeeze_tf(self): x = np.random.randn(1, 3, 4) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = tf.constant(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) @require_flax def test_squeeze_flax(self): x = np.random.randn(1, 3, 4) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x), np.asarray(squeeze(t)))) x = np.random.randn(1, 4, 1, 5) t = jnp.array(x) self.assertTrue(np.allclose(squeeze(x, axis=2), np.asarray(squeeze(t, axis=2)))) def test_expand_dims_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.expand_dims(x, axis=1))) @require_torch def test_expand_dims_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_tf def test_expand_dims_tf(self): x = np.random.randn(3, 4) t = tf.constant(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) @require_flax def test_expand_dims_flax(self): x = np.random.randn(3, 4) t = jnp.array(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.asarray(expand_dims(t, axis=1)))) class ValidationDecoratorTester(unittest.TestCase): def test_cases_no_warning(self): with warnings.catch_warnings(record=True) as raised_warnings: warnings.simplefilter("always") # basic test @filter_out_non_signature_kwargs() def func1(a): return a result = func1(1) self.assertEqual(result, 1) # include extra kwarg @filter_out_non_signature_kwargs(extra=["extra_arg"]) def func2(a, **kwargs): return a, kwargs a, kwargs = func2(1) self.assertEqual(a, 1) self.assertEqual(kwargs, {}) a, kwargs = func2(1, extra_arg=2) self.assertEqual(a, 1) self.assertEqual(kwargs, {"extra_arg": 2}) # multiple extra kwargs @filter_out_non_signature_kwargs(extra=["extra_arg", "extra_arg2"]) def func3(a, **kwargs): return a, kwargs a, kwargs = func3(2) self.assertEqual(a, 2) self.assertEqual(kwargs, {}) a, kwargs = func3(3, extra_arg2=3) self.assertEqual(a, 3) self.assertEqual(kwargs, {"extra_arg2": 3}) a, kwargs = func3(1, extra_arg=2, extra_arg2=3) self.assertEqual(a, 1) self.assertEqual(kwargs, {"extra_arg": 2, "extra_arg2": 3}) # Check that no warnings were raised self.assertEqual(len(raised_warnings), 0, f"Warning raised: {[w.message for w in raised_warnings]}") def test_cases_with_warnings(self): @filter_out_non_signature_kwargs() def func1(a): return a with self.assertWarns(UserWarning): func1(1, extra_arg=2) @filter_out_non_signature_kwargs(extra=["extra_arg"]) def func2(a, **kwargs): return kwargs with self.assertWarns(UserWarning): kwargs = func2(1, extra_arg=2, extra_arg2=3) self.assertEqual(kwargs, {"extra_arg": 2}) @filter_out_non_signature_kwargs(extra=["extra_arg", "extra_arg2"]) def func3(a, **kwargs): return kwargs with self.assertWarns(UserWarning): kwargs = func3(1, extra_arg=2, extra_arg2=3, extra_arg3=4) self.assertEqual(kwargs, {"extra_arg": 2, "extra_arg2": 3})
transformers/tests/utils/test_generic.py/0
{ "file_path": "transformers/tests/utils/test_generic.py", "repo_id": "transformers", "token_count": 4758 }
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # # this test validates that we can stack skip decorators in groups and whether # they work correctly with other decorators # # since the decorators have already built their decision params (like checking # env[], we can't mock the env and test each of the combinations), so ideally # the following 4 should be run. But since we have different CI jobs running # different configs, all combinations should get covered # # RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=1 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py import os import unittest import pytest from parameterized import parameterized from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device # skipping in unittest tests params = [(1,)] # test that we can stack our skip decorators with 3rd party decorators def check_slow(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow: assert True else: assert False, "should have been skipped" # test that we can stack our skip decorators def check_slow_torch_cuda(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow and torch_device == "cuda": assert True else: assert False, "should have been skipped" @require_torch class SkipTester(unittest.TestCase): @slow @require_torch_gpu def test_2_skips_slow_first(self): check_slow_torch_cuda() @require_torch_gpu @slow def test_2_skips_slow_last(self): check_slow_torch_cuda() # The combination of any skip decorator, followed by parameterized fails to skip the tests # 1. @slow manages to correctly skip `test_param_slow_first` # 2. but then `parameterized` creates new tests, with a unique name for each parameter groups. # It has no idea that they are to be skipped and so they all run, ignoring @slow # Therefore skip decorators must come after `parameterized` # # @slow # @parameterized.expand(params) # def test_param_slow_first(self, param=None): # check_slow() # This works as expected: # 1. `parameterized` creates new tests with unique names # 2. each of them gets an opportunity to be skipped @parameterized.expand(params) @slow def test_param_slow_last(self, param=None): check_slow() # skipping in non-unittest tests # no problem at all here @slow @require_torch_gpu def test_pytest_2_skips_slow_first(): check_slow_torch_cuda() @require_torch_gpu @slow def test_pytest_2_skips_slow_last(): check_slow_torch_cuda() @slow @pytest.mark.parametrize("param", [1]) def test_pytest_param_slow_first(param): check_slow() @pytest.mark.parametrize("param", [1]) @slow def test_pytest_param_slow_last(param): check_slow()
transformers/tests/utils/test_skip_decorators.py/0
{ "file_path": "transformers/tests/utils/test_skip_decorators.py", "repo_id": "transformers", "token_count": 1214 }
import argparse import difflib import glob import logging from io import StringIO from create_dependency_mapping import find_priority_list # Console for rich printing from modular_model_converter import convert_modular_file from rich.console import Console from rich.syntax import Syntax logging.basicConfig() logging.getLogger().setLevel(logging.ERROR) console = Console() def process_file(modular_file_path, generated_modeling_content, file_type="modeling_", fix_and_overwrite=False): file_name_prefix = file_type.split("*")[0] file_name_suffix = file_type.split("*")[-1] if "*" in file_type else "" file_path = modular_file_path.replace("modular_", f"{file_name_prefix}_").replace(".py", f"{file_name_suffix}.py") # Read the actual modeling file with open(file_path, "r") as modeling_file: content = modeling_file.read() output_buffer = StringIO(generated_modeling_content[file_type][0]) output_buffer.seek(0) output_content = output_buffer.read() diff = difflib.unified_diff( output_content.splitlines(), content.splitlines(), fromfile=f"{file_path}_generated", tofile=f"{file_path}", lineterm="", ) diff_list = list(diff) # Check for differences if diff_list: if fix_and_overwrite: with open(file_path, "w") as modeling_file: modeling_file.write(generated_modeling_content[file_type][0]) console.print(f"[bold blue]Overwritten {file_path} with the generated content.[/bold blue]") else: console.print(f"\n[bold red]Differences found between the generated code and {file_path}:[/bold red]\n") diff_text = "\n".join(diff_list) syntax = Syntax(diff_text, "diff", theme="ansi_dark", line_numbers=True) console.print(syntax) return 1 else: console.print(f"[bold green]No differences found for {file_path}.[/bold green]") return 0 def compare_files(modular_file_path, fix_and_overwrite=False): # Generate the expected modeling content generated_modeling_content = convert_modular_file(modular_file_path) diff = 0 for file_type in generated_modeling_content.keys(): diff += process_file(modular_file_path, generated_modeling_content, file_type, fix_and_overwrite) return diff if __name__ == "__main__": parser = argparse.ArgumentParser(description="Compare modular_xxx.py files with modeling_xxx.py files.") parser.add_argument( "--files", default=["all"], type=list, nargs="+", help="List of modular_xxx.py files to compare." ) parser.add_argument( "--fix_and_overwrite", action="store_true", help="Overwrite the modeling_xxx.py file if differences are found." ) args = parser.parse_args() if args.files == ["all"]: args.files = glob.glob("src/transformers/models/**/modular_*.py", recursive=True) non_matching_files = 0 for modular_file_path in find_priority_list(args.files): non_matching_files += compare_files(modular_file_path, args.fix_and_overwrite) if non_matching_files and not args.fix_and_overwrite: raise ValueError("Some diff and their modeling code did not match.")
transformers/utils/check_modular_conversion.py/0
{ "file_path": "transformers/utils/check_modular_conversion.py", "repo_id": "transformers", "token_count": 1229 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") r""" The argument `test_file` in this file refers to a model test file. This should be a string of the from `tests/models/*/test_modeling_*.py`. """ def get_module_path(test_file): """Return the module path of a model test file.""" components = test_file.split(os.path.sep) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " f"{test_file} instead." ) test_fn = components[-1] if not test_fn.endswith("py"): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead.") if not test_fn.startswith("test_modeling_"): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) components = components[:-1] + [test_fn.replace(".py", "")] test_module_path = ".".join(components) return test_module_path def get_test_module(test_file): """Get the module of a model test file.""" test_module_path = get_module_path(test_file) try: test_module = importlib.import_module(test_module_path) except AttributeError as exc: # e.g. if you have a `tests` folder in `site-packages`, created by another package, when trying to import # `tests.models...` raise ValueError( f"Could not import module {test_module_path}. Confirm that you don't have a package with the same root " "name installed or in your environment's `site-packages`." ) from exc return test_module def get_tester_classes(test_file): """Get all classes in a model test file whose names ends with `ModelTester`.""" tester_classes = [] test_module = get_test_module(test_file) for attr in dir(test_module): if attr.endswith("ModelTester"): tester_classes.append(getattr(test_module, attr)) # sort with class names return sorted(tester_classes, key=lambda x: x.__name__) def get_test_classes(test_file): """Get all [test] classes in a model test file with attribute `all_model_classes` that are non-empty. These are usually the (model) test classes containing the (non-slow) tests to run and are subclasses of one of the classes `ModelTesterMixin`, `TFModelTesterMixin` or `FlaxModelTesterMixin`, as well as a subclass of `unittest.TestCase`. Exceptions include `RagTestMixin` (and its subclasses). """ test_classes = [] test_module = get_test_module(test_file) for attr in dir(test_module): attr_value = getattr(test_module, attr) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). model_classes = getattr(attr_value, "all_model_classes", []) if len(model_classes) > 0: test_classes.append(attr_value) # sort with class names return sorted(test_classes, key=lambda x: x.__name__) def get_model_classes(test_file): """Get all model classes that appear in `all_model_classes` attributes in a model test file.""" test_classes = get_test_classes(test_file) model_classes = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes) # sort with class names return sorted(model_classes, key=lambda x: x.__name__) def get_model_tester_from_test_class(test_class): """Get the model tester class of a model test class.""" test = test_class() if hasattr(test, "setUp"): test.setUp() model_tester = None if hasattr(test, "model_tester"): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: model_tester = test.model_tester.__class__ return model_tester def get_test_classes_for_model(test_file, model_class): """Get all [test] classes in `test_file` that have `model_class` in their `all_model_classes`.""" test_classes = get_test_classes(test_file) target_test_classes = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(test_class) # sort with class names return sorted(target_test_classes, key=lambda x: x.__name__) def get_tester_classes_for_model(test_file, model_class): """Get all model tester classes in `test_file` that are associated to `model_class`.""" test_classes = get_test_classes_for_model(test_file, model_class) tester_classes = [] for test_class in test_classes: tester_class = get_model_tester_from_test_class(test_class) if tester_class is not None: tester_classes.append(tester_class) # sort with class names return sorted(tester_classes, key=lambda x: x.__name__) def get_test_to_tester_mapping(test_file): """Get a mapping from [test] classes to model tester classes in `test_file`. This uses `get_test_classes` which may return classes that are NOT subclasses of `unittest.TestCase`. """ test_classes = get_test_classes(test_file) test_tester_mapping = {test_class: get_model_tester_from_test_class(test_class) for test_class in test_classes} return test_tester_mapping def get_model_to_test_mapping(test_file): """Get a mapping from model classes to test classes in `test_file`.""" model_classes = get_model_classes(test_file) model_test_mapping = { model_class: get_test_classes_for_model(test_file, model_class) for model_class in model_classes } return model_test_mapping def get_model_to_tester_mapping(test_file): """Get a mapping from model classes to model tester classes in `test_file`.""" model_classes = get_model_classes(test_file) model_to_tester_mapping = { model_class: get_tester_classes_for_model(test_file, model_class) for model_class in model_classes } return model_to_tester_mapping def to_json(o): """Make the information succinct and easy to read. Avoid the full class representation like `<class 'transformers.models.bert.modeling_bert.BertForMaskedLM'>` when displaying the results. Instead, we use class name (`BertForMaskedLM`) for the readability. """ if isinstance(o, str): return o elif isinstance(o, type): return o.__name__ elif isinstance(o, (list, tuple)): return [to_json(x) for x in o] elif isinstance(o, dict): return {to_json(k): to_json(v) for k, v in o.items()} else: return o
transformers/utils/get_test_info.py/0
{ "file_path": "transformers/utils/get_test_info.py", "repo_id": "transformers", "token_count": 2737 }
"""A simple script to set flexibly CUDA_VISIBLE_DEVICES in GitHub Actions CI workflow files.""" import argparse import os if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--test_folder", type=str, default=None, help="The test folder name of the model being tested. For example, `models/cohere`.", ) args = parser.parse_args() # `test_eager_matches_sdpa_generate` for `cohere` needs a lot of GPU memory! # This depends on the runners. At this moment we are targeting our AWS CI runners. if args.test_folder == "models/cohere": cuda_visible_devices = "0,1,2,3" elif "CUDA_VISIBLE_DEVICES" in os.environ: cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES") else: cuda_visible_devices = "0" print(cuda_visible_devices)
transformers/utils/set_cuda_devices_for_ci.py/0
{ "file_path": "transformers/utils/set_cuda_devices_for_ci.py", "repo_id": "transformers", "token_count": 338 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that updates the metadata of the Transformers library in the repository `huggingface/transformers-metadata`. Usage for an update (as used by the GitHub action `update_metadata`): ```bash python utils/update_metadata.py --token <token> --commit_sha <commit_sha> ``` Usage to check all pipelines are properly defined in the constant `PIPELINE_TAGS_AND_AUTO_MODELS` of this script, so that new pipelines are properly added as metadata (as used in `make repo-consistency`): ```bash python utils/update_metadata.py --check-only ``` """ import argparse import collections import os import re import tempfile from typing import Dict, List, Tuple import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py TRANSFORMERS_PATH = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. transformers_module = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") _re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration|ForRetrieval)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) PIPELINE_TAGS_AND_AUTO_MODELS = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("image-feature-extraction", "MODEL_FOR_IMAGE_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("image-text-to-text", "MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES", "AutoModelForImageTextToText"), ("image-to-image", "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES", "AutoModelForImageToImage"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ("text-to-audio", "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES", "AutoModelForTextToSpectrogram"), ("text-to-audio", "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES", "AutoModelForTextToWaveform"), ] def camel_case_split(identifier: str) -> List[str]: """ Split a camel-cased name into words. Args: identifier (`str`): The camel-cased name to parse. Returns: `List[str]`: The list of words in the identifier (as seprated by capital letters). Example: ```py >>> camel_case_split("CamelCasedClass") ["Camel", "Cased", "Class"] ``` """ # Regex thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) return [m.group(0) for m in matches] def get_frameworks_table() -> pd.DataFrame: """ Generates a dataframe containing the supported auto classes for each model type, using the content of the auto modules. """ # Dictionary model names to config. config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES model_prefix_to_model_type = { config.replace("Config", ""): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. pt_models = collections.defaultdict(bool) tf_models = collections.defaultdict(bool) flax_models = collections.defaultdict(bool) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(transformers_module): lookup_dict = None if _re_tf_models.match(attr_name) is not None: lookup_dict = tf_models attr_name = _re_tf_models.match(attr_name).groups()[0] elif _re_flax_models.match(attr_name) is not None: lookup_dict = flax_models attr_name = _re_flax_models.match(attr_name).groups()[0] elif _re_pt_models.match(attr_name) is not None: lookup_dict = pt_models attr_name = _re_pt_models.match(attr_name).groups()[0] if lookup_dict is not None: while len(attr_name) > 0: if attr_name in model_prefix_to_model_type: lookup_dict[model_prefix_to_model_type[attr_name]] = True break # Try again after removing the last word in the name attr_name = "".join(camel_case_split(attr_name)[:-1]) all_models = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys())) all_models = list(all_models) all_models.sort() data = {"model_type": all_models} data["pytorch"] = [pt_models[t] for t in all_models] data["tensorflow"] = [tf_models[t] for t in all_models] data["flax"] = [flax_models[t] for t in all_models] # Now let's find the right processing class for each model. In order we check if there is a Processor, then a # Tokenizer, then a FeatureExtractor, then an ImageProcessor processors = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: processors[t] = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: processors[t] = "AutoTokenizer" elif t in transformers_module.models.auto.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES: processors[t] = "AutoImageProcessor" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: processors[t] = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. processors[t] = "AutoTokenizer" data["processor"] = [processors[t] for t in all_models] return pd.DataFrame(data) def update_pipeline_and_auto_class_table(table: Dict[str, Tuple[str, str]]) -> Dict[str, Tuple[str, str]]: """ Update the table maping models to pipelines and auto classes without removing old keys if they don't exist anymore. Args: table (`Dict[str, Tuple[str, str]]`): The existing table mapping model names to a tuple containing the pipeline tag and the auto-class name with which they should be used. Returns: `Dict[str, Tuple[str, str]]`: The updated table in the same format. """ auto_modules = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: model_mappings = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"] auto_classes = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"] # Loop through all three frameworks for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings): # The type of pipeline may not exist in this framework if not hasattr(module, mapping): continue # First extract all model_names model_names = [] for name in getattr(module, mapping).values(): if isinstance(name, str): model_names.append(name) else: model_names.extend(list(name)) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names}) return table def update_metadata(token: str, commit_sha: str): """ Update the metadata for the Transformers repo in `huggingface/transformers-metadata`. Args: token (`str`): A valid token giving write access to `huggingface/transformers-metadata`. commit_sha (`str`): The commit SHA on Transformers corresponding to this update. """ frameworks_table = get_frameworks_table() frameworks_dataset = Dataset.from_pandas(frameworks_table) resolved_tags_file = hf_hub_download( "huggingface/transformers-metadata", "pipeline_tags.json", repo_type="dataset", token=token ) tags_dataset = Dataset.from_json(resolved_tags_file) table = { tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"]) for i in range(len(tags_dataset)) } table = update_pipeline_and_auto_class_table(table) # Sort the model classes to avoid some nondeterministic updates to create false update commits. model_classes = sorted(table.keys()) tags_table = pd.DataFrame( { "model_class": model_classes, "pipeline_tag": [table[m][0] for m in model_classes], "auto_class": [table[m][1] for m in model_classes], } ) tags_dataset = Dataset.from_pandas(tags_table) hub_frameworks_json = hf_hub_download( repo_id="huggingface/transformers-metadata", filename="frameworks.json", repo_type="dataset", token=token, ) with open(hub_frameworks_json) as f: hub_frameworks_json = f.read() hub_pipeline_tags_json = hf_hub_download( repo_id="huggingface/transformers-metadata", filename="pipeline_tags.json", repo_type="dataset", token=token, ) with open(hub_pipeline_tags_json) as f: hub_pipeline_tags_json = f.read() with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(tmp_dir, "frameworks.json")) tags_dataset.to_json(os.path.join(tmp_dir, "pipeline_tags.json")) with open(os.path.join(tmp_dir, "frameworks.json")) as f: frameworks_json = f.read() with open(os.path.join(tmp_dir, "pipeline_tags.json")) as f: pipeline_tags_json = f.read() frameworks_equal = hub_frameworks_json == frameworks_json hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json if frameworks_equal and hub_pipeline_tags_equal: print("No updates on the Hub, not pushing the metadata files.") return if commit_sha is not None: commit_message = ( f"Update with commit {commit_sha}\n\nSee: " f"https://github.com/huggingface/transformers/commit/{commit_sha}" ) else: commit_message = "Update" upload_folder( repo_id="huggingface/transformers-metadata", folder_path=tmp_dir, repo_type="dataset", token=token, commit_message=commit_message, ) def check_pipeline_tags(): """ Check all pipeline tags are properly defined in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant of this script. """ in_table = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} pipeline_tasks = transformers_module.pipelines.SUPPORTED_TASKS missing = [] for key in pipeline_tasks: if key not in in_table: model = pipeline_tasks[key]["pt"] if isinstance(model, (list, tuple)): model = model[0] model = model.__name__ if model not in in_table.values(): missing.append(key) if len(missing) > 0: msg = ", ".join(missing) raise ValueError( "The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside " f"`utils/update_metadata.py`: {msg}. Please add them!" ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") args = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
transformers/utils/update_metadata.py/0
{ "file_path": "transformers/utils/update_metadata.py", "repo_id": "transformers", "token_count": 6448 }
# Training customization TRL is designed with modularity in mind so that users to be able to efficiently customize the training loop for their needs. Below are some examples on how you can apply and test different techniques. Note: Although these examples use the DPOTrainer, the customization applies to most (if not all) trainers. ## Train on multiple GPUs / nodes The trainers in TRL use 🤗 Accelerate to enable distributed training across multiple GPUs or nodes. To do so, first create an 🤗 Accelerate config file by running ```bash accelerate config ``` and answering the questions according to your multi-gpu / multi-node setup. You can then launch distributed training by running: ```bash accelerate launch your_script.py ``` We also provide config files in the [examples folder](https://github.com/huggingface/trl/tree/main/examples/accelerate_configs) that can be used as templates. To use these templates, simply pass the path to the config file when launching a job, e.g.: ```shell accelerate launch --config_file=examples/accelerate_configs/multi_gpu.yaml --num_processes {NUM_GPUS} path_to_script.py --all_arguments_of_the_script ``` Refer to the [examples page](https://github.com/huggingface/trl/tree/main/examples) for more details. ### Distributed training with DeepSpeed All of the trainers in TRL can be run on multiple GPUs together with DeepSpeed ZeRO-{1,2,3} for efficient sharding of the optimizer states, gradients, and model weights. To do so, run: ```shell accelerate launch --config_file=examples/accelerate_configs/deepspeed_zero{1,2,3}.yaml --num_processes {NUM_GPUS} path_to_your_script.py --all_arguments_of_the_script ``` Note that for ZeRO-3, a small tweak is needed to initialize your reward model on the correct device via the `zero3_init_context_manager()` context manager. In particular, this is needed to avoid DeepSpeed hanging after a fixed number of training steps. Here is a snippet of what is involved from the [`sentiment_tuning`](https://github.com/huggingface/trl/blob/main/examples/scripts/ppo.py) example: ```python ds_plugin = ppo_trainer.accelerator.state.deepspeed_plugin if ds_plugin is not None and ds_plugin.is_zero3_init_enabled(): with ds_plugin.zero3_init_context_manager(enable=False): sentiment_pipe = pipeline("sentiment-analysis", model="lvwerra/distilbert-imdb", device=device) else: sentiment_pipe = pipeline("sentiment-analysis", model="lvwerra/distilbert-imdb", device=device) ``` Consult the 🤗 Accelerate [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more information about the DeepSpeed plugin. ## Use different optimizers and schedulers By default, the `DPOTrainer` creates a `torch.optim.AdamW` optimizer. You can create and define a different optimizer and pass it to `DPOTrainer` as follows: ```python from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from torch import optim from trl import DPOConfig, DPOTrainer model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train") training_args = DPOConfig(output_dir="Qwen2.5-0.5B-DPO") optimizer = optim.SGD(model.parameters(), lr=training_args.learning_rate) trainer = DPOTrainer( model=model, args=training_args, train_dataset=dataset, tokenizer=tokenizer, optimizers=(optimizer, None), ) trainer.train() ``` ### Add a learning rate scheduler You can also play with your training by adding learning rate schedulers. ```python from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from torch import optim from trl import DPOConfig, DPOTrainer model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train") training_args = DPOConfig(output_dir="Qwen2.5-0.5B-DPO") optimizer = optim.AdamW(model.parameters(), lr=training_args.learning_rate) lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1) trainer = DPOTrainer( model=model, args=training_args, train_dataset=dataset, tokenizer=tokenizer, optimizers=(optimizer, lr_scheduler), ) trainer.train() ``` ## Memory efficient fine-tuning by sharing layers Another tool you can use for more memory efficient fine-tuning is to share layers between the reference model and the model you want to train. ```python from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from trl import create_reference_model, DPOConfig, DPOTrainer model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") ref_model = create_reference_model(model, num_shared_layers=6) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train[:1%]") training_args = DPOConfig(output_dir="Qwen2.5-0.5B-DPO") trainer = DPOTrainer( model=model, ref_model=ref_model, args=training_args, train_dataset=dataset, tokenizer=tokenizer, ) trainer.train() ``` ## Pass 8-bit reference models Since `trl` supports all keyword arguments when loading a model from `transformers` using `from_pretrained`, you can also leverage `load_in_8bit` from `transformers` for more memory efficient fine-tuning. Read more about 8-bit model loading in `transformers` [here](https://huggingface.co/docs/transformers/en/peft#load-in-8bit-or-4bit). ```python from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from trl import DPOConfig, DPOTrainer model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") quantization_config = BitsAndBytesConfig(load_in_8bit=True) ref_model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct", quantization_config= quantization_config) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct") dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train") training_args = DPOConfig(output_dir="Qwen2.5-0.5B-DPO") trainer = DPOTrainer( model=model, ref_model=ref_model, args=training_args, train_dataset=dataset, tokenizer=tokenizer, ) trainer.train() ``` ## Use the CUDA cache optimizer When training large models, you should better handle the CUDA cache by iteratively clearing it. To do so, simply pass `optimize_cuda_cache=True` to `DPOConfig`: ```python training_args = DPOConfig(..., optimize_cuda_cache=True) ```
trl/docs/source/customization.md/0
{ "file_path": "trl/docs/source/customization.md", "repo_id": "trl", "token_count": 2231 }
# Learning Tools (Experimental 🧪) Using Large Language Models (LLMs) with tools has been a popular topic recently with awesome works such as [ToolFormer](https://huggingface.co/papers/2302.04761) and [ToolBench](https://huggingface.co/papers/2305.16504). In TRL, we provide a simple example of how to teach LLM to use tools with reinforcement learning. Here's an overview of the scripts in the [trl repository](https://github.com/lvwerra/trl/tree/main/examples/research_projects/tools): | File | Description | |---|---| | [`calculator.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/calculator.py) | Script to train LLM to use a calculator with reinforcement learning. | | [`triviaqa.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/triviaqa.py) | Script to train LLM to use a wiki tool to answer questions. | | [`python_interpreter.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/python_interpreter.py) | Script to train LLM to use python interpreter to solve math puzzles. | <Tip warning={true}> Note that the scripts above rely heavily on the `TextEnvironment` API which is still under active development. The API may change in the future. Please see [`TextEnvironment`](text_environment) for the related docs. </Tip> ## Learning to Use a Calculator The rough idea is as follows: 1. Load a tool such as [ybelkada/simple-calculator](https://huggingface.co/spaces/ybelkada/simple-calculator) that parse a text calculation like `"14 + 34"` and return the calculated number: ```python from transformers import AutoTokenizer, load_tool tool = load_tool("ybelkada/simple-calculator") tool_fn = lambda text: str(round(float(tool(text)), 2)) # rounding to 2 decimal places ``` 1. Define a reward function that returns a positive reward if the tool returns the correct answer. In the script we create a dummy reward function like `reward_fn = lambda x: 1`, but we override the rewards directly later. 1. Create a prompt on how to use the tools ```python # system prompt prompt = """\ What is 13.1-3? <request><SimpleCalculatorTool>13.1-3<call>10.1<response> Result=10.1<submit> What is 4*3? <request><SimpleCalculatorTool>4*3<call>12<response> Result=12<submit> What is 12.1+1? <request><SimpleCalculatorTool>12.1+1<call>13.1<response> Result=13.1<submit> What is 12.1-20? <request><SimpleCalculatorTool>12.1-20<call>-7.9<response> Result=-7.9<submit>""" ``` 3. Create a `trl.TextEnvironment` with the model ```python env = TextEnvironment( model, tokenizer, {"SimpleCalculatorTool": tool_fn}, reward_fn, prompt, generation_kwargs=generation_kwargs, ) ``` 4. Then generate some data such as `tasks = ["\n\nWhat is 13.1-3?", "\n\nWhat is 4*3?"]` and run the environment with `queries, responses, masks, rewards, histories = env.run(tasks)`. The environment will look for the `<call>` token in the prompt and append the tool output to the response; it will also return the mask associated with the response. You can further use the `histories` to visualize the interaction between the model and the tool; `histories[0].show_text()` will show the text with color-coded tool output and `histories[0].show_tokens(tokenizer)` will show visualize the tokens. ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/learning_tools.png) 1. Finally, we can train the model with `train_stats = ppo_trainer.step(queries, responses, rewards, masks)`. The trainer will use the mask to ignore the tool output when computing the loss, make sure to pass that argument to `step`. ## Experiment results We trained a model with the above script for 10 random seeds. You can reproduce the run with the following command. Feel free to remove the `--slurm-*` arguments if you don't have access to a slurm cluster. ``` WANDB_TAGS="calculator_final" python benchmark/benchmark.py \ --command "python examples/research_projects/tools/calculator.py" \ --num-seeds 10 \ --start-seed 1 \ --workers 10 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 8 \ --slurm-template-path benchmark/trl.slurm_template ``` We can then use [`openrlbenchmark`](https://github.com/openrlbenchmark/openrlbenchmark) which generates the following plot. ``` # pip install openrlbenchmark==0.2.1a5 python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=openrlbenchmark&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.tracker_project_name&cen=trl_ppo_trainer_config.value.log_with&metrics=env/reward_mean&metrics=objective/kl' \ 'wandb?tag=calculator_final&cl=calculator_mask' \ --env-ids trl \ --check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename static/0compare \ --scan-history ``` ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/learning_tools_chart.png) As we can see, while 1-2 experiments crashed for some reason, most of the runs obtained near perfect proficiency in the calculator task. ## (Early Experiments 🧪): learning to use a wiki tool for question answering In the [ToolFormer](https://huggingface.co/papers/2302.04761) paper, it shows an interesting use case that utilizes a Wikipedia Search tool to help answer questions. In this section, we attempt to perform similar experiments but uses RL instead to teach the model to use a wiki tool on the [TriviaQA](https://nlp.cs.washington.edu/triviaqa/) dataset. <Tip warning={true}> **Note that many settings are different so the results are not directly comparable.** </Tip> ### Building a search index Since [ToolFormer](https://huggingface.co/papers/2302.04761) did not open source, we needed to first replicate the search index. It is mentioned in their paper that the authors built the search index using a BM25 retriever that indexes the Wikipedia dump from [KILT](https://github.com/facebookresearch/KILT) Fortunately, [`pyserini`](https://github.com/castorini/pyserini) already implements the BM25 retriever and provides a prebuilt index for the KILT Wikipedia dump. We can use the following code to search the index. ```python from pyserini.search.lucene import LuceneSearcher import json searcher = LuceneSearcher.from_prebuilt_index('wikipedia-kilt-doc') def search(query): hits = searcher.search(query, k=1) hit = hits[0] contents = json.loads(hit.raw)['contents'] return contents print(search("tennis racket")) ``` ``` Racket (sports equipment) A racket or racquet is a sports implement consisting of a handled frame with an open hoop across which a network of strings or catgut is stretched tightly. It is used for striking a ball or shuttlecock in games such as squash, tennis, racquetball, and badminton. Collectively, these games are known as racket sports. Racket design and manufacturing has changed considerably over the centuries. The frame of rackets for all sports was traditionally made of solid wood (later laminated wood) and the strings of animal intestine known as catgut. The traditional racket size was limited by the strength and weight of the wooden frame which had to be strong enough to hold the strings and stiff enough to hit the ball or shuttle. Manufacturers started adding non-wood laminates to wood rackets to improve stiffness. Non-wood rackets were made first of steel, then of aluminum, and then carbon fiber composites. Wood is still used for real tennis, rackets, and xare. Most rackets are now made of composite materials including carbon fiber or fiberglass, metals such as titanium alloys, or ceramics. ... ``` We then basically deployed this snippet as a Hugging Face space [here](https://huggingface.co/spaces/vwxyzjn/pyserini-wikipedia-kilt-doc), so that we can use the space as a `transformers.Tool` later. ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/pyserini.png) ### Experiment settings We use the following settings: * use the `bigcode/starcoderbase` model as the base model * use the `pyserini-wikipedia-kilt-doc` space as the wiki tool and only uses the first paragraphs of the search result, allowing the `TextEnvironment` to obtain at most `max_tool_reponse=400` response tokens from the tool. * test if the response contain the answer string, if so, give a reward of 1, otherwise, give a reward of 0. * notice this is a simplified evaluation criteria. In [ToolFormer](https://huggingface.co/papers/2302.04761), the authors checks if the first 20 words of the response contain the correct answer. * used the following prompt that demonstrates the usage of the wiki tool. ```python prompt = """\ Answer the following question: Q: In which branch of the arts is Patricia Neary famous? A: Ballets A2: <request><Wiki>Patricia Neary<call>Patricia Neary (born October 27, 1942) is an American ballerina, choreographer and ballet director, who has been particularly active in Switzerland. She has also been a highly successful ambassador for the Balanchine Trust, bringing George Balanchine's ballets to 60 cities around the globe.<response> Result=Ballets<submit> Q: Who won Super Bowl XX? A: Chicago Bears A2: <request><Wiki>Super Bowl XX<call>Super Bowl XX was an American football game between the National Football Conference (NFC) champion Chicago Bears and the American Football Conference (AFC) champion New England Patriots to decide the National Football League (NFL) champion for the 1985 season. The Bears defeated the Patriots by the score of 46–10, capturing their first NFL championship (and Chicago's first overall sports victory) since 1963, three years prior to the birth of the Super Bowl. Super Bowl XX was played on January 26, 1986 at the Louisiana Superdome in New Orleans.<response> Result=Chicago Bears<submit> Q: """ ``` ### Result and Discussion Our experiments show that the agent can learn to use the wiki tool to answer questions. The learning curves would go up mostly, but one of the experiment did crash. ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/triviaqa_learning_curves.png) Wandb report is [here](https://wandb.ai/costa-huang/cleanRL/reports/TriviaQA-Final-Experiments--Vmlldzo1MjY0ODk5) for further inspection. Note that the correct rate of the trained model is on the low end, which could be due to the following reasons: * **incorrect searches:** When given the question `"What is Bruce Willis' real first name?"` if the model searches for `Bruce Willis`, our wiki tool returns "Patrick Poivey (born 18 February 1948) is a French actor. He is especially known for his voice: he is the French dub voice of Bruce Willis since 1988.` But a correct search should be `Walter Bruce Willis (born March 19, 1955) is an American former actor. He achieved fame with a leading role on the comedy-drama series Moonlighting (1985–1989) and appeared in over a hundred films, gaining recognition as an action hero after his portrayal of John McClane in the Die Hard franchise (1988–2013) and other roles.[1][2]" ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/real_first_name.png) * **unnecessarily long response**: The wiki tool by default sometimes output very long sequences. E.g., when the wiki tool searches for "Brown Act" * Our wiki tool returns "The Ralph M. Brown Act, located at California Government Code 54950 "et seq.", is an act of the California State Legislature, authored by Assemblymember Ralph M. Brown and passed in 1953, that guarantees the public's right to attend and participate in meetings of local legislative bodies." * [ToolFormer](https://huggingface.co/papers/2302.04761)'s wiki tool returns "The Ralph M. Brown Act is an act of the California State Legislature that guarantees the public's right to attend and participate in meetings of local legislative bodies." which is more succinct. ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/brown_act.png) ## (Early Experiments 🧪): solving math puzzles with python interpreter In this section, we attempt to teach the model to use a python interpreter to solve math puzzles. The rough idea is to give the agent a prompt like the following: ```python prompt = """\ Example of using a Python API to solve math questions. Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left? <request><PythonInterpreter> def solution(): money_initial = 23 bagels = 5 bagel_cost = 3 money_spent = bagels * bagel_cost money_left = money_initial - money_spent result = money_left return result print(solution()) <call>8<response> Result = 8 <submit> Q: """ ``` Training experiment can be found at https://wandb.ai/lvwerra/trl-gsm8k/runs/a5odv01y ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/gms8k_learning_curve.png)
trl/docs/source/learning_tools.md/0
{ "file_path": "trl/docs/source/learning_tools.md", "repo_id": "trl", "token_count": 3870 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from huggingface_hub import ModelCard from transformers import HfArgumentParser @dataclass class ScriptArguments: r""" Arguments for the script. Args: push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the dataset to the Hugging Face Hub. repo_id (`str`, *optional*, defaults to `"trl-lib/hh-rlhf-helpful-base"`): Hugging Face repository ID to push the dataset to. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of workers to use for dataset processing. """ push_to_hub: bool = field( default=False, metadata={"help": "Whether to push the dataset to the Hugging Face Hub."}, ) repo_id: str = field( default="trl-lib/hh-rlhf-helpful-base", metadata={"help": "Hugging Face repository ID to push the dataset to."} ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of workers to use for dataset processing."} ) def common_start(str1: str, str2: str) -> str: # Zip the two strings and iterate over them together common_chars = [] for c1, c2 in zip(str1, str2): if c1 == c2: common_chars.append(c1) else: break # Join the common characters and return as a string return "".join(common_chars) def extract_dialogue(example: str) -> list[dict[str, str]]: # Extract the prompt, which corresponds to the common start of the chosen and rejected dialogues prompt_text = common_start(example["chosen"], example["rejected"]) # The chosen and rejected may share a common start, so we need to remove the common part if not prompt_text.endswith("\n\nAssistant: "): prompt_text = prompt_text[: prompt_text.rfind("\n\nAssistant: ")] + "\n\nAssistant: " # Extract the chosen and rejected lines chosen_line = example["chosen"][len(prompt_text) :] rejected_line = example["rejected"][len(prompt_text) :] # Remove the generation prompt ("\n\nAssistant: ") from the prompt prompt_text = prompt_text[: -len("\n\nAssistant: ")] # Split the string at every occurrence of "Human: " or "Assistant: " prompt_lines = re.split(r"(\n\nAssistant: |\n\nHuman: )", prompt_text) # Remove the first element as it's empty prompt_lines = prompt_lines[1:] prompt = [] for idx in range(0, len(prompt_lines), 2): role = "user" if prompt_lines[idx] == "\n\nHuman: " else "assistant" content = prompt_lines[idx + 1] prompt.append({"role": role, "content": content}) # Remove the prompt from the chosen and rejected dialogues chosen = [{"role": "assistant", "content": chosen_line}] rejected = [{"role": "assistant", "content": rejected_line}] return {"prompt": prompt, "chosen": chosen, "rejected": rejected} model_card = ModelCard(""" --- tags: [trl] --- # HH-RLHF-Helpful-Base Dataset ## Summary The HH-RLHF-Helpful-Base dataset is a processed version of [Anthropic's HH-RLHF](https://huggingface.co/datasets/Anthropic/hh-rlhf) dataset, specifically curated to train models using the [TRL library](https://github.com/huggingface/trl) for preference learning and alignment tasks. It contains pairs of text samples, each labeled as either "chosen" or "rejected," based on human preferences regarding the helpfulness of the responses. This dataset enables models to learn human preferences in generating helpful responses, enhancing their ability to assist users effectively. ## Data Structure - **Format**: [Conversational](https://huggingface.co/docs/trl/main/dataset_formats#conversational) - **Type**: [Preference](https://huggingface.co/docs/trl/main/dataset_formats#preference) Columns: - `"prompt"`: The user query. - `"chosen"`: A response deemed helpful by human evaluators. - `"rejected"`: A response considered less helpful or unhelpful. This structure allows models to learn to prefer the _chosen_ response over the _rejected_ one, thereby aligning with human preferences in helpfulness. ## Generation script The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/hh-rlhf-helpful-base.py). """) if __name__ == "__main__": parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] dataset = load_dataset("Anthropic/hh-rlhf", data_dir="helpful-base") dataset = dataset.map(extract_dialogue, num_proc=script_args.dataset_num_proc) if script_args.push_to_hub: dataset.push_to_hub(script_args.repo_id) model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
trl/examples/datasets/hh-rlhf-helpful-base.py/0
{ "file_path": "trl/examples/datasets/hh-rlhf-helpful-base.py", "repo_id": "trl", "token_count": 1843 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional import torch from peft import PeftConfig, PeftModel from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser @dataclass class ScriptArguments: """ The input names representing the Adapter and Base model fine-tuned with PEFT, and the output name representing the merged model. """ adapter_model_name: Optional[str] = field(default=None, metadata={"help": "the adapter name"}) base_model_name: Optional[str] = field(default=None, metadata={"help": "the base model name"}) output_name: Optional[str] = field(default=None, metadata={"help": "the merged model name"}) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] assert script_args.adapter_model_name is not None, "please provide the name of the Adapter you would like to merge" assert script_args.base_model_name is not None, "please provide the name of the Base model" assert script_args.output_name is not None, "please provide the output name of the merged model" peft_config = PeftConfig.from_pretrained(script_args.adapter_model_name) if peft_config.task_type == "SEQ_CLS": # The sequence classification task is used for the reward model in PPO model = AutoModelForSequenceClassification.from_pretrained( script_args.base_model_name, num_labels=1, torch_dtype=torch.bfloat16 ) else: model = AutoModelForCausalLM.from_pretrained( script_args.base_model_name, return_dict=True, torch_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name) # Load the PEFT model model = PeftModel.from_pretrained(model, script_args.adapter_model_name) model.eval() model = model.merge_and_unload() model.save_pretrained(f"{script_args.output_name}") tokenizer.save_pretrained(f"{script_args.output_name}") model.push_to_hub(f"{script_args.output_name}", use_temp_dir=False)
trl/examples/research_projects/stack_llama/scripts/merge_peft_adapter.py/0
{ "file_path": "trl/examples/research_projects/stack_llama/scripts/merge_peft_adapter.py", "repo_id": "trl", "token_count": 814 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil from accelerate import PartialState from datasets import load_dataset from transformers import ( AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, ) from trl import ModelConfig, RLOOConfig, RLOOTrainer, ScriptArguments from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE """ python examples/scripts/rloo/rloo_tldr.py \ --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ --dataset_test_split validation \ --learning_rate 3e-6 \ --output_dir models/minimal/ppo \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 64 \ --total_episodes 30000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ --missing_eos_penalty 1.0 \ --stop_token eos \ --response_length 53 accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ examples/scripts/rloo/rloo_tldr.py \ --dataset_name trl-internal-testing/tldr-preference-sft-trl-style \ --dataset_test_split validation \ --output_dir models/minimal/rloo_tldr \ --num_ppo_epochs 1 \ --num_mini_batches 1 \ --learning_rate 3e-6 \ --per_device_train_batch_size 16 \ --gradient_accumulation_steps 4 \ --total_episodes 1000000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ --local_rollout_forward_batch_size 16 \ --missing_eos_penalty 1.0 \ --stop_token eos """ if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, RLOOConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_into_dataclasses() # remove output_dir if exists shutil.rmtree(training_args.output_dir, ignore_errors=True) ################ # Model & Tokenizer ################ tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, padding_side="left", trust_remote_code=model_args.trust_remote_code ) tokenizer.add_special_tokens({"pad_token": "[PAD]"}) if tokenizer.chat_template is None: tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE reward_model = AutoModelForSequenceClassification.from_pretrained( training_args.reward_model_path, trust_remote_code=model_args.trust_remote_code, num_labels=1 ) ref_policy = AutoModelForCausalLM.from_pretrained( training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code ) policy = AutoModelForCausalLM.from_pretrained( training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code ) ################ # Dataset ################ dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) train_dataset = dataset[script_args.dataset_train_split] eval_dataset = dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None def prepare_dataset(dataset, tokenizer): """pre-tokenize the dataset before training; only collate during training""" def tokenize(element): input_ids = tokenizer.apply_chat_template( element["messages"][:1], padding=False, add_generation_prompt=True, ) return {"input_ids": input_ids, "lengths": len(input_ids)} return dataset.map( tokenize, remove_columns=dataset.column_names, num_proc=training_args.dataset_num_proc, ) # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): train_dataset = prepare_dataset(train_dataset, tokenizer) eval_dataset = prepare_dataset(eval_dataset, tokenizer) # filtering train_dataset = train_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc) eval_dataset = eval_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc) assert train_dataset[0]["input_ids"][-1] != tokenizer.eos_token_id, "The last token should not be an EOS token" ################ # Training ################ trainer = RLOOTrainer( config=training_args, processing_class=tokenizer, policy=policy, ref_policy=ref_policy, reward_model=reward_model, train_dataset=train_dataset, eval_dataset=eval_dataset, ) trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name) trainer.generate_completions()
trl/examples/scripts/rloo/rloo_tldr.py/0
{ "file_path": "trl/examples/scripts/rloo/rloo_tldr.py", "repo_id": "trl", "token_count": 2235 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from transformers.utils import is_peft_available from trl import is_diffusers_available from .testing_utils import require_diffusers if is_diffusers_available() and is_peft_available(): from trl import DDPOConfig, DDPOTrainer, DefaultDDPOStableDiffusionPipeline def scorer_function(images, prompts, metadata): return torch.randn(1) * 3.0, {} def prompt_function(): return ("cabbages", {}) @require_diffusers class DDPOTrainerTester(unittest.TestCase): """ Test the DDPOTrainer class. """ def setUp(self): self.training_args = DDPOConfig( num_epochs=2, train_gradient_accumulation_steps=1, per_prompt_stat_tracking_buffer_size=32, sample_num_batches_per_epoch=2, sample_batch_size=2, mixed_precision=None, save_freq=1000000, ) pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch" pretrained_revision = "main" pipeline = DefaultDDPOStableDiffusionPipeline( pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=False ) self.trainer = DDPOTrainer(self.training_args, scorer_function, prompt_function, pipeline) return super().setUp() def tearDown(self) -> None: gc.collect() def test_loss(self): advantage = torch.tensor([-1.0]) clip_range = 0.0001 ratio = torch.tensor([1.0]) loss = self.trainer.loss(advantage, clip_range, ratio) self.assertEqual(loss.item(), 1.0) def test_generate_samples(self): samples, output_pairs = self.trainer._generate_samples(1, 2) self.assertEqual(len(samples), 1) self.assertEqual(len(output_pairs), 1) self.assertEqual(len(output_pairs[0][0]), 2) def test_calculate_loss(self): samples, _ = self.trainer._generate_samples(1, 2) sample = samples[0] latents = sample["latents"][0, 0].unsqueeze(0) next_latents = sample["next_latents"][0, 0].unsqueeze(0) log_probs = sample["log_probs"][0, 0].unsqueeze(0) timesteps = sample["timesteps"][0, 0].unsqueeze(0) prompt_embeds = sample["prompt_embeds"] advantage = torch.tensor([1.0], device=prompt_embeds.device) self.assertTupleEqual(latents.shape, (1, 4, 64, 64)) self.assertTupleEqual(next_latents.shape, (1, 4, 64, 64)) self.assertTupleEqual(log_probs.shape, (1,)) self.assertTupleEqual(timesteps.shape, (1,)) self.assertTupleEqual(prompt_embeds.shape, (2, 77, 32)) loss, approx_kl, clipfrac = self.trainer.calculate_loss( latents, timesteps, next_latents, log_probs, advantage, prompt_embeds ) self.assertTrue(torch.isfinite(loss.cpu())) @require_diffusers class DDPOTrainerWithLoRATester(DDPOTrainerTester): """ Test the DDPOTrainer class. """ def setUp(self): self.training_args = DDPOConfig( num_epochs=2, train_gradient_accumulation_steps=1, per_prompt_stat_tracking_buffer_size=32, sample_num_batches_per_epoch=2, sample_batch_size=2, mixed_precision=None, save_freq=1000000, ) pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch" pretrained_revision = "main" pipeline = DefaultDDPOStableDiffusionPipeline( pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=True ) self.trainer = DDPOTrainer(self.training_args, scorer_function, prompt_function, pipeline) return super().setUp()
trl/tests/test_ddpo_trainer.py/0
{ "file_path": "trl/tests/test_ddpo_trainer.py", "repo_id": "trl", "token_count": 1846 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from datasets import Dataset, load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer from transformers.testing_utils import require_peft from transformers.utils import is_peft_available from trl import RewardConfig, RewardTrainer, maybe_apply_chat_template from trl.trainer.reward_trainer import _tokenize if is_peft_available(): from peft import LoraConfig, TaskType class RewardTrainerTester(unittest.TestCase): def setUp(self): self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.model = AutoModelForSequenceClassification.from_pretrained(self.model_id) self.model.config.pad_token_id = self.tokenizer.pad_token_id def test_preprocessing_conversational(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") training_args = RewardConfig(output_dir=tmp_dir, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) dummy_dataset = dummy_dataset.map(maybe_apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer}) dummy_dataset = dummy_dataset.map(_tokenize, batched=True, fn_kwargs={"tokenizer": self.tokenizer}) self.assertDictEqual(trainer.train_dataset[:], dummy_dataset[:]) def test_preprocessing_standard(self): # No chat template, so we load a fresh tokenizer tokenizer = AutoTokenizer.from_pretrained(self.model_id) with tempfile.TemporaryDirectory() as tmp_dir: dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_preference", split="train") training_args = RewardConfig(output_dir=tmp_dir, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=tokenizer, train_dataset=dummy_dataset ) dummy_dataset = dummy_dataset.map(_tokenize, batched=True, fn_kwargs={"tokenizer": tokenizer}) self.assertDictEqual(trainer.train_dataset[:], dummy_dataset[:]) def test_train_full(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") training_args = RewardConfig(output_dir=tmp_dir, max_steps=3, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) def test_train_full_pretokenized(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") dummy_dataset = dummy_dataset.map(maybe_apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer}) dummy_dataset = dummy_dataset.map(_tokenize, batched=True, fn_kwargs={"tokenizer": self.tokenizer}) training_args = RewardConfig(output_dir=tmp_dir, max_steps=3, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) if param.sum() != 0: # ignore 0 biases self.assertFalse(torch.allclose(param, new_param, rtol=1e-12, atol=1e-12)) @require_peft def test_train_lora(self): peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) with tempfile.TemporaryDirectory() as tmp_dir: dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") training_args = RewardConfig(output_dir=tmp_dir, max_steps=3, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset, peft_config=peft_config, ) previous_trainable_params = {} previous_non_trainable_params = {} # due to a change in the way the modules to save are dealt in PEFT. trainable_params_name = ["lora", "modules_to_save"] # check gradients are not None for n, param in trainer.model.named_parameters(): if any(t in n for t in trainable_params_name): previous_trainable_params[n] = param.clone() else: previous_non_trainable_params[n] = param.clone() trainer.train() self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) self.assertFalse(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12)) # Check that the non trainable parameters have not changed for n, param in previous_non_trainable_params.items(): new_param = trainer.model.get_parameter(n) self.assertTrue(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12)) @require_peft def test_train_lora_pretokenized(self): peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) with tempfile.TemporaryDirectory() as tmp_dir: dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") dummy_dataset = dummy_dataset.map(maybe_apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer}) dummy_dataset = dummy_dataset.map(_tokenize, batched=True, fn_kwargs={"tokenizer": self.tokenizer}) training_args = RewardConfig(output_dir=tmp_dir, max_steps=3, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset, peft_config=peft_config, ) previous_trainable_params = {} previous_non_trainable_params = {} # due to a change in the way the modules to save are dealt in PEFT. trainable_params_name = ["lora", "modules_to_save"] # check gradients are not None for n, param in trainer.model.named_parameters(): if any(t in n for t in trainable_params_name): previous_trainable_params[n] = param.clone() else: previous_non_trainable_params[n] = param.clone() trainer.train() self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) # Check that the parameters have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) self.assertFalse(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12)) # Check that the non trainable parameters have not changed for n, param in previous_non_trainable_params.items(): new_param = trainer.model.get_parameter(n) self.assertTrue(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12)) def test_margin(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_dataset_dict = { "input_ids_chosen": [ torch.LongTensor([0, 1, 2]), ], "attention_mask_chosen": [ torch.LongTensor([1, 1, 1]), ], "input_ids_rejected": [ torch.LongTensor([0, 2]), ], "attention_mask_rejected": [ torch.LongTensor([1, 1]), ], "margin": [ torch.FloatTensor([1.0]), ], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) training_args = RewardConfig(output_dir=tmp_dir, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) batch = [dummy_dataset[0]] batch = trainer.data_collator(batch) batch = {k: v.to(trainer.model.device) if isinstance(v, torch.Tensor) else v for k, v in batch.items()} loss, outputs = trainer.compute_loss(trainer.model, batch, return_outputs=True) l_val = -torch.nn.functional.logsigmoid( outputs["rewards_chosen"] - outputs["rewards_rejected"] - batch["margin"] ).mean() self.assertLess(abs(loss - l_val), 1e-6) def test_tags(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train") training_args = RewardConfig(output_dir=tmp_dir, report_to="none") trainer = RewardTrainer( model=self.model, args=training_args, processing_class=self.tokenizer, train_dataset=dummy_dataset ) self.assertEqual(trainer.model.model_tags, trainer._tag_names)
trl/tests/test_reward_trainer.py/0
{ "file_path": "trl/tests/test_reward_trainer.py", "repo_id": "trl", "token_count": 5202 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Callable, Optional, Union import torch from transformers import GenerationConfig, PreTrainedTokenizer, PreTrainedTokenizerFast, set_seed from ..models import SUPPORTED_ARCHITECTURES, PreTrainedModelWrapper class BestOfNSampler: def __init__( self, model: PreTrainedModelWrapper, tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], queries_to_scores: Callable[[list[str]], list[float]], length_sampler: Any, sample_size: int = 4, seed: Optional[int] = None, n_candidates: int = 1, generation_config: Optional[GenerationConfig] = None, ) -> None: r""" Initialize the sampler for best-of-n generation Args: model (`PreTrainedModelWrapper`): The pretrained model to use for generation tokenizer (`PreTrainedTokenizer` or `PreTrainedTokenizerFast`): Tokenizer associated with the pretrained model queries_to_scores (`Callable[[list[str]], list[float]]`): Callable that takes a list of generated texts and returns the associated reward scores length_sampler (`Any`): Sampler used to sample the length of the generated text sample_size (`int`): Number of samples to generate for each query seed (`int`, *optional*): Random seed used to control generation n_candidates (`int`): Number of candidates to return for each query generation_config (`GenerationConfig`, *optional*): Generation config passed to the underlying model's `generate` method. See `GenerationConfig` (https://huggingface.co/docs/transformers/v4.29.1/en/main_classes/text_generation#transformers.GenerationConfig) for more details """ if seed is not None: set_seed(seed) if not isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)): raise ValueError( f"tokenizer must be a PreTrainedTokenizer or PreTrainedTokenizerFast, got {type(tokenizer)}" ) if not isinstance(model, (SUPPORTED_ARCHITECTURES)): raise ValueError( f"model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}" ) self.model = model self.tokenizer = tokenizer self.queries_to_scores = queries_to_scores self.length_sampler = length_sampler self.gen_config = generation_config self.sample_size = sample_size self.n_candidates = n_candidates def generate( self, tokenized_query: Union[list[int], torch.Tensor, list[torch.Tensor], list[list[int]]], skip_special_tokens: bool = True, device: Optional[Union[str, torch.device]] = None, **generation_kwargs, ) -> list[list[str]]: r""" Generate the best of n samples for input queries Args: tokenized_query (`list[int]` or `torch.Tensor` or `list[torch.Tensor]` or `list[int]`): represents either a single tokenized query (a single tensor or a list of integers) or a batch of tokenized queries (a list of tensors or a list of lists of integers) skip_special_tokens (`bool`): Whether to remove the special tokens from the output device (`str` or `torch.device`, *optional*): The device on which the model will be loaded **generation_kwargs (`dict`, *optional*): Additional keyword arguments passed along to the underlying model's `generate` method. This is used to override generation config Returns: list[list[str]]: A list of lists of generated texts """ queries = None if isinstance(tokenized_query, torch.Tensor) and tokenized_query.ndim == 1: queries = tokenized_query.unsqueeze(0) elif isinstance(tokenized_query, list): element_type = type(tokenized_query[0]) if element_type is int: queries = torch.tensor(tokenized_query).unsqueeze(0) elif element_type is torch.Tensor: queries = [tensor.reshape((1, -1)) for tensor in tokenized_query] else: queries = [torch.tensor(query).reshape((1, -1)) for query in tokenized_query] result = [] for query in queries: queries = query.repeat((self.sample_size, 1)) output = self.model.generate( queries.to(device), max_new_tokens=self.length_sampler(), generation_config=self.gen_config, **generation_kwargs, ).squeeze() output = self.tokenizer.batch_decode(output, skip_special_tokens=skip_special_tokens) scores = torch.tensor(self.queries_to_scores(output)) output = [output[i] for i in scores.topk(self.n_candidates).indices] result.append(output) return result
trl/trl/extras/best_of_n_sampler.py/0
{ "file_path": "trl/trl/extras/best_of_n_sampler.py", "repo_id": "trl", "token_count": 2404 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from .sft_config import SFTConfig @dataclass class GKDConfig(SFTConfig): """ Configuration class for [`GKDTrainer`]. Args: temperature (`float`, *optional*, defaults to `0.9`): Temperature for sampling. The higher the temperature, the more random the completions. lmbda (`float`, *optional*, defaults to `0.5`): Lambda parameter that controls the student data fraction (i.e., the proportion of on-policy student-generated outputs). beta (`float`, *optional*, defaults to `0.5`): Interpolation coefficient between `0.0` and `1.0` of the Generalized Jensen-Shannon Divergence loss. When beta is `0.0`, the loss is the KL divergence. When beta is `1.0`, the loss is the Inverse KL Divergence. max_new_tokens (`int`, *optional*, defaults to `128`): Maximum number of tokens to generate per completion. teacher_model_name_or_path (`str` or `None`, *optional*, defaults to `None`): Model name or path of the teacher model. If `None`, the teacher model will be the same as the model being trained. teacher_model_init_kwargs (`dict[str, Any]]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the teacher model from a string. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model. seq_kd (`bool`, *optional*, defaults to `False`): Seq_kd parameter that controls whether to perform Sequence-Level KD (can be viewed as supervised FT on teacher-generated output). """ temperature: float = field( default=0.9, metadata={"help": "Temperature for sampling. The higher the temperature, the more random the completions."}, ) lmbda: float = field( default=0.5, metadata={ "help": "Lambda parameter that controls the student data fraction (i.e., the proportion of on-policy " "student-generated outputs)." }, ) beta: float = field( default=0.5, metadata={ "help": "Interpolation coefficient between `0.0` and `1.0` of the Generalized Jensen-Shannon Divergence " "loss. When beta is `0.0`, the loss is the KL divergence. When beta is `1.0`, the loss is the Inverse KL " "Divergence." }, ) max_new_tokens: int = field( default=128, metadata={"help": "Maximum number of tokens to generate per completion."}, ) teacher_model_name_or_path: Optional[str] = field( default=None, metadata={ "help": "Model name or path of the teacher model. If `None`, the teacher model will be the same as the " "model being trained." }, ) teacher_model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the " "teacher model from a string." }, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropouts in `model`."}, ) seq_kd: bool = field( default=False, metadata={ "help": "Seq_kd parameter that controls whether to perform Sequence-Level KD (can be viewed as supervised " "FT on teacher-generated output)." }, ) def __post_init__(self): super().__post_init__() # check lmbda and beta are in the range [0, 1] if self.lmbda < 0.0 or self.lmbda > 1.0: raise ValueError("lmbda must be in the range [0.0, 1.0].") if self.beta < 0.0 or self.beta > 1.0: raise ValueError("beta must be in the range [0.0, 1.0].")
trl/trl/trainer/gkd_config.py/0
{ "file_path": "trl/trl/trainer/gkd_config.py", "repo_id": "trl", "token_count": 1765 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import math import os import textwrap import time from collections import defaultdict from contextlib import contextmanager, nullcontext from typing import Optional, Union import numpy as np import pandas as pd import torch import torch.nn as nn from accelerate import Accelerator from accelerate.utils import broadcast, gather_object from datasets import Dataset from torch.utils.data import DataLoader from transformers import ( BaseImageProcessor, DataCollatorWithPadding, FeatureExtractionMixin, GenerationConfig, PreTrainedTokenizerBase, ProcessorMixin, Trainer, TrainerCallback, TrainerControl, is_wandb_available, ) from transformers.integrations import get_reporting_integration_callbacks from transformers.trainer import DEFAULT_CALLBACKS, DEFAULT_PROGRESS_CALLBACK from transformers.trainer_callback import CallbackHandler, ExportableState, PrinterCallback from transformers.utils import is_peft_available from ..core import masked_mean, masked_whiten from ..models import create_reference_model from ..models.utils import unwrap_model_for_generation from .ppo_config import PPOConfig from .utils import ( OnlineTrainerState, batch_generation, disable_dropout_in_model, exact_div, first_true_indices, forward, generate_model_card, get_comet_experiment_url, get_reward, log_table_to_comet_experiment, peft_module_casting_to_bf16, prepare_deepspeed, print_rich_table, selective_log_softmax, truncate_response, ) if is_peft_available(): from peft import PeftConfig, PeftModel, get_peft_model if is_wandb_available(): import wandb INVALID_LOGPROB = 1.0 # taken from https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/ppo/ppo_trainer.py#L29 # we did this we can do a single `model = accelerator.prepare(model)` class PolicyAndValueWrapper(nn.Module): def __init__(self, policy, value_model) -> None: super().__init__() self.policy = policy self.value_model = value_model self.critic_backbone = getattr(value_model, value_model.base_model_prefix) def forward(self, **kwargs): output = self.critic_backbone(**kwargs) logits = self.value_model.score(output.hidden_states[-1]) return self.policy(**kwargs), logits class PPOTrainer(Trainer): _tag_names = ["trl", "ppo"] def __init__( self, args: PPOConfig, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ], model: nn.Module, ref_model: Optional[nn.Module], reward_model: nn.Module, train_dataset: Dataset, value_model: Optional[nn.Module] = None, data_collator: Optional[DataCollatorWithPadding] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, # less commonly used optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), callbacks: Optional[list[TrainerCallback]] = None, peft_config: Optional["PeftConfig"] = None, ) -> None: if ref_model is model: raise ValueError( "`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the " "same as `model`, you must make a copy of it, or `None` if you use peft." ) self.args = args self.processing_class = processing_class self.policy_model = model # Define the collator if not provided if data_collator is None: data_collator = DataCollatorWithPadding(self.processing_class) # Handle stop token settings: update policy model's generation_config to use provided stop token if args.stop_token and args.stop_token_id: raise ValueError("You cannot set both `stop_token` and `stop_token_id`.") elif args.stop_token: if args.stop_token == "eos": self.policy_model.generation_config.eos_token_id = self.stop_token_id = processing_class.eos_token_id else: raise ValueError( f"Unknown `stop_token` {args.stop_token}. Allowed values are: `'eos'` and `None` (no stop token)." ) else: self.policy_model.generation_config.eos_token_id = self.stop_token_id = args.stop_token_id # None or int # peft support if not is_peft_available() and peft_config is not None: raise ImportError( "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models" ) elif is_peft_available() and peft_config is not None: # if model is a peft model and we have a peft_confg, we merge and unload it first if isinstance(self.policy_model, PeftModel): self.policy_model = self.policy_model.merge_and_unload() # get peft model with the given config self.policy_model = get_peft_model(self.policy_model, peft_config) if args.bf16 and getattr(self.policy_model, "is_loaded_in_4bit", False): peft_module_casting_to_bf16(self.policy_model) self.is_peft_model = is_peft_available() and isinstance(self.policy_model, PeftModel) self.model_adapter_name = args.model_adapter_name self.ref_adapter_name = args.ref_adapter_name if ref_model: self.ref_model = ref_model elif self.is_peft_model: self.ref_model = None else: self.ref_model = create_reference_model(self.policy_model) self.reward_model = reward_model self.train_dataset = train_dataset self.train_dataset_len = len(train_dataset) self.value_model = value_model self.data_collator = data_collator self.eval_dataset = eval_dataset self.optimizer, self.lr_scheduler = optimizers self.optimizer_cls_and_kwargs = None # needed for transformers >= 4.47 ######### # calculate various batch sizes ######### if args.total_episodes is None: # allow the users to define episodes in terms of epochs. args.total_episodes = int(args.num_train_epochs * self.train_dataset_len) accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps) self.accelerator = accelerator args.world_size = accelerator.num_processes args.local_batch_size = ( args.per_device_train_batch_size * args.gradient_accumulation_steps * args.num_mini_batches ) args.micro_batch_size = int(args.per_device_train_batch_size * args.world_size) args.batch_size = int(args.local_batch_size * args.world_size) args.mini_batch_size = exact_div( args.batch_size, args.num_mini_batches, "`batch_size` must be a multiple of `num_mini_batches`" ) args.local_mini_batch_size = exact_div( args.local_batch_size, args.num_mini_batches, "`local_batch_size` must be a multiple of `num_mini_batches`" ) if args.whiten_rewards: assert ( args.local_mini_batch_size >= 8 ), f"Per-rank minibatch size {args.local_mini_batch_size} is insufficient for whitening" # `per_rank_rollout_batch_size` is our `args.local_batch_size` # `per_rank_minibatch_size` is our `args.local_mini_batch_size` args.num_total_batches = math.ceil( args.total_episodes / args.batch_size ) # we may train for more than `total_episodes` time_tensor = torch.tensor(int(time.time()), device=accelerator.device) time_int = broadcast(time_tensor, 0).item() # avoid different timestamps across processes args.run_name = f"{args.exp_name}__{args.seed}__{time_int}" self.local_seed = args.seed + accelerator.process_index * 100003 # Prime if args.num_sample_generations > 0: self.sample_generations_freq = max(1, args.num_total_batches // args.num_sample_generations) self.local_dataloader_batch_size = args.local_batch_size ######### # setup model, optimizer, and others ######### for module in [self.policy_model, self.ref_model, self.value_model, self.reward_model]: if module is not None: disable_dropout_in_model(module) self.model = PolicyAndValueWrapper(self.policy_model, self.value_model) self.model.config = self.policy_model.config # needed for pushing to hub self.create_optimizer_and_scheduler( num_training_steps=args.num_total_batches ) # note that we are calling `self.lr_scheduler.step()` manually only at the batch level ######### ### trainer specifics ######### default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) self.callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler( self.callbacks, self.model, self.processing_class, self.optimizer, self.lr_scheduler ) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) self.control = TrainerControl() self.state = OnlineTrainerState( is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero(), stateful_callbacks=[ cb for cb in self.callback_handler.callbacks + [self.control] if isinstance(cb, ExportableState) ], ) self.current_flos = 0 self.hp_search_backend = None self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None # Create distant repo and output directory if needed self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) ######### ### setup dataloader ######### self.dataloader = DataLoader( self.train_dataset, batch_size=self.local_dataloader_batch_size, shuffle=True, collate_fn=self.data_collator, drop_last=True, # needed; otherwise the last batch will be of ragged shape ) # sync random states for DataLoader(shuffle=True) before `accelerator.prepare` # see https://gist.github.com/vwxyzjn/2581bff1e48e185e0b85b6dfe1def79c torch.manual_seed(args.seed) self.model, self.optimizer, self.dataloader = accelerator.prepare(self.model, self.optimizer, self.dataloader) torch.manual_seed(self.local_seed) # reset the local seed again self.eval_dataloader = DataLoader( self.eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=self.data_collator, drop_last=True, ) # no need to shuffle eval dataset self.eval_dataloader = accelerator.prepare(self.eval_dataloader) if self.is_deepspeed_enabled: self.reward_model = prepare_deepspeed( self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16 ) if self.ref_model is None: if not self.is_peft_model: raise ValueError("No reference model and model is not a Peft model.") else: self.ref_model = prepare_deepspeed( self.ref_model, args.per_device_train_batch_size, args.fp16, args.bf16 ) else: if self.ref_model is None: if not self.is_peft_model: raise ValueError("No reference model and model is not a Peft model.") else: self.ref_model = self.ref_model.to(self.accelerator.device) self.reward_model = self.reward_model.to(self.accelerator.device) def get_train_dataloader(self) -> DataLoader: return self.dataloader def get_eval_dataloader(self) -> DataLoader: return self.eval_dataloader @contextmanager def null_ref_context(self): """Context manager for handling null reference model (that is, peft adapter manipulation).""" with ( self.accelerator.unwrap_model(self.model.policy).disable_adapter() if self.is_peft_model and not self.ref_adapter_name else nullcontext() ): if self.ref_adapter_name: self.model.policy.set_adapter(self.ref_adapter_name) yield if self.ref_adapter_name: self.model.policy.set_adapter(self.model_adapter_name or "default") def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): backup_model = self.model self.model = self.model.policy # save only the policy if self.is_deepspeed_enabled: backup_deepspeed = self.deepspeed self.deepspeed = self.model super().save_model(output_dir, _internal_call) self.model = backup_model if self.is_deepspeed_enabled: self.deepspeed = backup_deepspeed def train(self): args = self.args accelerator = self.accelerator optimizer = self.optimizer model = self.model ref_policy = self.ref_model reward_model = self.reward_model processing_class = self.processing_class dataloader = self.dataloader device = accelerator.device def repeat_generator(): while True: yield from dataloader iter_dataloader = iter(repeat_generator()) generation_config = GenerationConfig( max_new_tokens=args.response_length, temperature=(args.temperature + 1e-7), top_k=0.0, top_p=1.0, do_sample=True, ) accelerator.print("===training policy===") start_time = time.time() stats_shape = (args.num_ppo_epochs, args.num_mini_batches, args.gradient_accumulation_steps) approxkl_stats = torch.zeros(stats_shape, device=device) pg_clipfrac_stats = torch.zeros(stats_shape, device=device) pg_loss_stats = torch.zeros(stats_shape, device=device) vf_loss_stats = torch.zeros(stats_shape, device=device) vf_clipfrac_stats = torch.zeros(stats_shape, device=device) entropy_stats = torch.zeros(stats_shape, device=device) ratio_stats = torch.zeros(stats_shape, device=device) model.train() # trainer state initialization self.state.global_step = 0 self.state.episode = 0 self.state.max_steps = args.num_total_batches * args.num_mini_batches self.state.num_train_epochs = args.total_episodes / self.train_dataset_len # Compute absolute values for logging, eval, and save if given as ratio if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(self.state.max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(self.state.max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(self.state.max_steps * args.save_steps) else: self.state.save_steps = args.save_steps self.control = self.callback_handler.on_train_begin(args, self.state, self.control) # backward compatibility if self.is_deepspeed_enabled: self.deepspeed = self.model self.model_wrapped = self.model for update in range(1, args.num_total_batches + 1): self.state.episode += 1 * args.batch_size data = next(iter_dataloader) with torch.no_grad(): queries = data["input_ids"].to(device) context_length = queries.shape[1] responses = [] postprocessed_responses = [] logprobs = [] ref_logprobs = [] scores = [] sequence_lengths = [] values = [] with unwrap_model_for_generation( self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation ) as unwrapped_model: query_responses, logitss = batch_generation( unwrapped_model.policy, queries, args.local_rollout_forward_batch_size, processing_class.pad_token_id, generation_config, ) for i in range(0, queries.shape[0], args.local_rollout_forward_batch_size): query = queries[i : i + args.local_rollout_forward_batch_size] query_response = query_responses[i : i + args.local_rollout_forward_batch_size] response = query_response[:, context_length:] logits = logitss[i : i + args.local_rollout_forward_batch_size] logprob = selective_log_softmax(logits, response) del logits torch.cuda.empty_cache() if ref_policy is None: with self.null_ref_context(): ref_output = forward(model.policy, query_response, processing_class.pad_token_id) else: ref_output = forward(ref_policy, query_response, processing_class.pad_token_id) ref_logits = ref_output.logits[:, context_length - 1 : -1] ref_logits /= args.temperature + 1e-7 ref_logprob = selective_log_softmax(ref_logits, response) del ref_output, ref_logits torch.cuda.empty_cache() # Response Processing 1. truncate response after the first occurrence of `stop_token_id` postprocessed_response = response if self.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0 postprocessed_response = truncate_response( self.stop_token_id, processing_class.pad_token_id, response ) # Response Processing 2. run reward model on the truncated responses postprocessed_query_response = torch.cat((query, postprocessed_response), 1) sequence_length = first_true_indices(postprocessed_response == processing_class.pad_token_id) - 1 unwrapped_value_model = accelerator.unwrap_model(model).value_model full_value, _, _ = get_reward( unwrapped_value_model, query_response, processing_class.pad_token_id, context_length ) value = full_value[:, context_length - 1 : -1].squeeze(-1) _, score, _ = get_reward( reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length ) responses.append(response) postprocessed_responses.append(postprocessed_response) logprobs.append(logprob) ref_logprobs.append(ref_logprob) sequence_lengths.append(sequence_length) scores.append(score) values.append(value) responses = torch.cat(responses, 0) postprocessed_responses = torch.cat(postprocessed_responses, 0) logprobs = torch.cat(logprobs, 0) ref_logprobs = torch.cat(ref_logprobs, 0) sequence_lengths = torch.cat(sequence_lengths, 0) scores = torch.cat(scores, 0) values = torch.cat(values, 0) del (logprob, ref_logprob, full_value, value, score, unwrapped_model) torch.cuda.empty_cache() gc.collect() # Response Processing 3. Filter completion. Ensure that the sample contains stop_token_id # Completions not passing that filter will receive a lower score. contain_eos_token = torch.any(postprocessed_responses == self.processing_class.eos_token_id, dim=-1) if self.args.missing_eos_penalty is not None: scores[~contain_eos_token] -= self.args.missing_eos_penalty # accelerator.print(f"{scores=}, {(contain_eos_token.sum() / len(contain_eos_token))=}") # be very careful with `padding_mask_p1`; see https://excalidraw.com/#json=LWnzG4w2k5DjF_EOL_xPt,e2w3a-hFJ_gX5vOfeyXGTw response_idxs = torch.arange(responses.shape[1], device=responses.device).repeat(responses.shape[0], 1) padding_mask = response_idxs > sequence_lengths.unsqueeze(1) logprobs = torch.masked_fill(logprobs, padding_mask, INVALID_LOGPROB) ref_logprobs = torch.masked_fill(ref_logprobs, padding_mask, INVALID_LOGPROB) sequence_lengths_p1 = sequence_lengths + 1 padding_mask_p1 = response_idxs > (sequence_lengths_p1.unsqueeze(1)) values = torch.masked_fill(values, padding_mask_p1, 0) # 4. compute rewards kl = logprobs - ref_logprobs non_score_reward = -args.kl_coef * kl rewards = non_score_reward.clone() actual_start = torch.arange(rewards.size(0), device=rewards.device) actual_end = torch.where(sequence_lengths_p1 < rewards.size(1), sequence_lengths_p1, sequence_lengths) rewards[[actual_start, actual_end]] += scores # 5. whiten rewards if args.whiten_rewards: rewards = masked_whiten(rewards, mask=~padding_mask_p1, shift_mean=False) rewards = torch.masked_fill(rewards, padding_mask_p1, 0) # 6. compute advantages and returns lastgaelam = 0 advantages_reversed = [] gen_length = responses.shape[1] for t in reversed(range(gen_length)): nextvalues = values[:, t + 1] if t < gen_length - 1 else 0.0 delta = rewards[:, t] + args.gamma * nextvalues - values[:, t] lastgaelam = delta + args.gamma * args.lam * lastgaelam advantages_reversed.append(lastgaelam) advantages = torch.stack(advantages_reversed[::-1], axis=1) returns = advantages + values advantages = masked_whiten(advantages, ~padding_mask) advantages = torch.masked_fill(advantages, padding_mask, 0) torch.cuda.empty_cache() # Do multiple epochs of PPO training, with a fresh random shuffle in each epoch for ppo_epoch_idx in range(args.num_ppo_epochs): b_inds = np.random.permutation(args.local_batch_size) minibatch_idx = 0 for mini_batch_start in range(0, args.local_batch_size, args.local_mini_batch_size): mini_batch_end = mini_batch_start + args.local_mini_batch_size mini_batch_inds = b_inds[mini_batch_start:mini_batch_end] gradient_accumulation_idx = 0 for micro_batch_start in range(0, args.local_mini_batch_size, args.per_device_train_batch_size): with accelerator.accumulate(model): micro_batch_end = micro_batch_start + args.per_device_train_batch_size micro_batch_inds = mini_batch_inds[micro_batch_start:micro_batch_end] mb_advantage = advantages[micro_batch_inds] mb_responses = responses[micro_batch_inds] mb_query_responses = query_responses[micro_batch_inds] mb_logprobs = logprobs[micro_batch_inds] mb_return = returns[micro_batch_inds] mb_values = values[micro_batch_inds] output, vpred_temp = forward(model, mb_query_responses, processing_class.pad_token_id) logits = output.logits[:, context_length - 1 : -1] logits /= args.temperature + 1e-7 new_logprobs = selective_log_softmax(logits, mb_responses) new_logprobs = torch.masked_fill( new_logprobs, padding_mask[micro_batch_inds], INVALID_LOGPROB ) vpred = vpred_temp[:, context_length - 1 : -1].squeeze(-1) vpred = torch.masked_fill(vpred, padding_mask_p1[micro_batch_inds], 0) vpredclipped = torch.clamp( vpred, mb_values - args.cliprange_value, mb_values + args.cliprange_value, ) vf_losses1 = torch.square(vpred - mb_return) vf_losses2 = torch.square(vpredclipped - mb_return) vf_loss_max = torch.max(vf_losses1, vf_losses2) vf_loss = 0.5 * masked_mean(vf_loss_max, ~padding_mask_p1[micro_batch_inds]) vf_clipfrac = masked_mean( (vf_losses2 > vf_losses1).float(), ~padding_mask_p1[micro_batch_inds] ) logprobs_diff = new_logprobs - mb_logprobs ratio = torch.exp(logprobs_diff) pg_losses = -mb_advantage * ratio pg_losses2 = -mb_advantage * torch.clamp(ratio, 1.0 - args.cliprange, 1.0 + args.cliprange) pg_loss_max = torch.max(pg_losses, pg_losses2) pg_loss = masked_mean(pg_loss_max, ~padding_mask[micro_batch_inds]) loss = pg_loss + args.vf_coef * vf_loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() with torch.no_grad(): pg_clipfrac = masked_mean( (pg_losses2 > pg_losses).float(), ~padding_mask[micro_batch_inds] ) prob_dist = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, dim=-1) - torch.sum(prob_dist * logits, dim=-1) approxkl = 0.5 * (logprobs_diff**2).mean() approxkl_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = approxkl pg_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = ( pg_clipfrac ) pg_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = pg_loss vf_loss_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = vf_loss vf_clipfrac_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = ( vf_clipfrac ) entropy_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = entropy.mean() ratio_stats[ppo_epoch_idx, minibatch_idx, gradient_accumulation_idx] = ratio.mean() gradient_accumulation_idx += 1 minibatch_idx += 1 # del everything and empty cache # fmt: off del ( output, vpred_temp, logits, new_logprobs, vpred, vpredclipped, vf_losses1, vf_losses2, vf_loss, vf_clipfrac, logprobs_diff, ratio, pg_losses, pg_losses2, pg_loss_max, pg_loss, loss, pg_clipfrac, prob_dist, entropy, approxkl, mb_return, mb_advantage, mb_values, mb_responses, mb_query_responses, mb_logprobs, ) # fmt: on torch.cuda.empty_cache() with torch.no_grad(): mean_kl = kl.sum(1).mean() mean_entropy = (-logprobs).sum(1).mean() mean_non_score_reward = non_score_reward.sum(1).mean() rlhf_reward = mean_non_score_reward + scores.mean() eps = int(self.state.episode / (time.time() - start_time)) metrics = {} metrics["eps"] = eps metrics["objective/kl"] = self.accelerator.gather_for_metrics(mean_kl).mean().item() metrics["objective/entropy"] = self.accelerator.gather_for_metrics(mean_entropy).mean().item() metrics["objective/non_score_reward"] = ( self.accelerator.gather_for_metrics(mean_non_score_reward).mean().item() ) metrics["objective/rlhf_reward"] = self.accelerator.gather_for_metrics(rlhf_reward).mean().item() metrics["objective/scores"] = self.accelerator.gather_for_metrics(scores.mean()).mean().item() metrics["policy/approxkl_avg"] = self.accelerator.gather_for_metrics(approxkl_stats).mean().item() metrics["policy/clipfrac_avg"] = self.accelerator.gather_for_metrics(pg_clipfrac_stats).mean().item() metrics["loss/policy_avg"] = self.accelerator.gather_for_metrics(pg_loss_stats).mean().item() metrics["loss/value_avg"] = self.accelerator.gather_for_metrics(vf_loss_stats).mean().item() metrics["val/clipfrac_avg"] = self.accelerator.gather_for_metrics(vf_clipfrac_stats).mean().item() metrics["policy/entropy_avg"] = self.accelerator.gather_for_metrics(entropy_stats).mean().item() metrics["val/ratio"] = self.accelerator.gather_for_metrics(ratio_stats).mean().item() metrics["val/ratio_var"] = self.accelerator.gather_for_metrics(ratio_stats).var().item() metrics["val/num_eos_tokens"] = (responses == processing_class.eos_token_id).sum().item() metrics["lr"] = self.lr_scheduler.get_last_lr()[0] metrics["episode"] = self.state.episode self.state.epoch = self.state.episode / self.train_dataset_len # used by self.log self.state.global_step += 1 self.log(metrics) self.lr_scheduler.step() self.control = self.callback_handler.on_step_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None) self.control = self.callback_handler.on_save(self.args, self.state, self.control) del kl, mean_kl, mean_entropy, mean_non_score_reward, scores, metrics, non_score_reward torch.cuda.empty_cache() gc.collect() if args.num_sample_generations > 0 and (update - 1) % self.sample_generations_freq == 0: self.generate_completions(sampling=True) torch.cuda.empty_cache() del ( query_responses, responses, postprocessed_responses, logprobs, ref_logprobs, values, sequence_lengths, contain_eos_token, sequence_lengths_p1, response_idxs, padding_mask, padding_mask_p1, rewards, actual_start, actual_end, advantages, returns, ) torch.cuda.empty_cache() # HF trainer specifics self.control = self.callback_handler.on_train_end(args, self.state, self.control) if self.control.should_save: self._save_checkpoint(model, trial=None, metrics=None) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def generate_completions(self, sampling: bool = False): args = self.args processing_class = self.processing_class generation_config = GenerationConfig( max_new_tokens=self.args.response_length, temperature=(0.01 + 1e-7), top_k=0.0, top_p=1.0, do_sample=True, ) table = defaultdict(list) with unwrap_model_for_generation( self.model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation ) as unwrapped_model: for batch in self.eval_dataloader: query = batch["input_ids"] with torch.no_grad(): context_length = query.shape[1] query_response, _ = batch_generation( unwrapped_model.policy, query, query.shape[0], processing_class.pad_token_id, generation_config, ) response = query_response[:, context_length:] postprocessed_response = response if self.stop_token_id is not None: # handle the edge case when stop_token_id exists but is 0 postprocessed_response = truncate_response( self.stop_token_id, processing_class.pad_token_id, response ) table["query"].extend( gather_object(processing_class.batch_decode(query, skip_special_tokens=True)) ) table["model response"].extend( gather_object(processing_class.batch_decode(postprocessed_response)) ) postprocessed_query_response = torch.cat((query, postprocessed_response), 1) _, score, _ = get_reward( self.reward_model, postprocessed_query_response, processing_class.pad_token_id, context_length ) table["score"].extend(self.accelerator.gather_for_metrics(score).float().cpu().numpy()) if sampling: break df = pd.DataFrame(table) if self.accelerator.is_main_process: print_rich_table(df.iloc[0 : 0 + 5]) if "wandb" in args.report_to: import wandb if wandb.run is not None: wandb.log({"completions": wandb.Table(dataframe=df)}) if "comet_ml" in args.report_to: log_table_to_comet_experiment( name="completions.csv", table=df, ) def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None tags = tags or [] if isinstance(tags, str): tags = [tags] if hasattr(self.model.config, "unsloth_version"): tags.append("unsloth") citation = textwrap.dedent("""\ @article{mziegler2019fine-tuning, title = {{Fine-Tuning Language Models from Human Preferences}}, author = {Daniel M. Ziegler and Nisan Stiennon and Jeffrey Wu and Tom B. Brown and Alec Radford and Dario Amodei and Paul F. Christiano and Geoffrey Irving}, year = 2019, eprint = {arXiv:1909.08593} }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="PPO", trainer_citation=citation, paper_title="Fine-Tuning Language Models from Human Preferences", paper_id="1909.08593", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/ppo_trainer.py/0
{ "file_path": "trl/trl/trainer/ppo_trainer.py", "repo_id": "trl", "token_count": 19227 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Execution process When working with distributed training systems, it is important to manage how and when processes are executed across GPUs. Some processes are completed faster than others, and some processes shouldn't begin if others haven't finished yet. Accelerate provides tools for orchestrating when processes are executed to ensure everything remains synchronized across all devices. This tutorial will teach you how to execute a process on only one machine and how to delay execution until all processes have reached a certain point. ## Execute on one process Certain code only needs to be run once on a given machine, such as printing a log statement or only displaying one progress bar on the local main process. <hfoptions id="local-execution"> <hfoption id="statements"> You should use `accelerator.is_local_main_process` to indicate code that should only be executed once. ```py from tqdm.auto import tqdm progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) ``` You could also wrap a statement with `accelerator.is_local_main_process`. > [!TIP] > For standalone `print` statements that aren't wrapped in `accelerator.is_local_main_process`, replace `print` with Accelerate's [`~Accelerator.print`] method to only print once per process. ```py if accelerator.is_local_main_process: print("Accelerate is the best") ``` </hfoption> <hfoption id="function"> For a function that should only be executed once, use [`~Accelerator.on_local_main_process`]. ```py @accelerator.on_local_main_process def do_my_thing(): "Something done once per server" do_thing_once_per_server() ``` </hfoption> </hfoptions> You could also direct Accelerate to execute code once across *all processes* regardless of the number of machines. This is useful if you're uploading a final model to the Hub. <hfoptions id="main-execution"> <hfoption id="statement"> You should use `accelerator.is_main_process` to indicate code that should only be executed once across all processes. ```py if accelerator.is_main_process: repo.push_to_hub() ``` </hfoption> <hfoption id="function"> For a function that should only be executed once across all processes, use [`~Accelerator.on_main_process`]. ```py @accelerator.on_main_process def do_my_thing(): "Something done once per server" do_thing_once() ``` </hfoption> </hfoptions> ## Execute on a specific process Accelerate can also help you execute functions that should only be executed on a specific process or a local process index. <hfoptions id="specific-execution"> <hfoption id="specific process"> Use the [`~Accelerator.on_process`] method and specify the process index to execute a function on. ```py @accelerator.on_process(process_index=0) def do_my_thing(): "Something done on process index 0" do_thing_on_index_zero() ``` </hfoption> <hfoption id="local process"> Use the [`~Accelerator.on_local_process`] method and specify the local process index to execute a function on. ```py @accelerator.on_local_process(local_process_idx=0) def do_my_thing(): "Something done on process index 0 on each server" do_thing_on_index_zero_on_each_server() ``` </hfoption> </hfoptions> ## Defer execution When you run your script on several GPUs at the same time, some code may be executed faster than others. You might need to wait for all processes to reach a certain point before executing the next set of instructions. For instance, you shouldn’t save a model before making sure every process is done with training. To do this, add [`~Accelerator.wait_for_everyone`] in your code. This blocks all processes that have finished first from continuing until all remaining processes have reached the same point (this has no effect if you're running on a single GPU or CPU). ```py accelerator.wait_for_everyone() ```
accelerate/docs/source/basic_tutorials/execution.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/execution.md", "repo_id": "accelerate", "token_count": 1307 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Megatron-LM [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale. It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder). For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM). ## What is integrated? Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder): a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks. Each tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path. For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and this section of blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism). b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP, please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and this section of blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism). c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP. It reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks post `all-reduce` by replacing then with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost. To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g., if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample. This increases the batch size that can be supported for training. For more details, please refer to the research paper [Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footprint by sharding optimizer states and gradients across DP ranks (versus the traditional method of replicating the optimizer state across data parallel ranks). For example, when using Adam optimizer with mixed-precision training, each parameter accounts for 12 bytes of memory. This gets distributed equally across the GPUs, i.e., each parameter would account for 3 bytes (12/4) if we have 4 GPUs. For more details, please refer the research paper [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of blog [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#zero-data-parallelism). e. **Selective Activation Recomputation**: Reduces the memory footprint of activations significantly via smart activation checkpointing. It doesn't store activations occupying large memory while being fast to recompute thereby achieving great tradeoff between memory and recomputation. For example, for GPT-3, this leads to 70% reduction in required memory for activations at the expense of only 2.7% FLOPs overhead for recomputation of activations. For more details, please refer to the research paper [Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer. PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition. g. **Support for Indexed datasets**: Efficient binary format of datasets for large scale training. Support for the `mmap`, `cached` index file and the `lazy` loader format. h. **Checkpoint reshaping and interoperability**: Utility for reshaping Megatron-LM checkpoints of variable tensor and pipeline parallel sizes to the beloved Transformers sharded checkpoints as it has great support with plethora of tools such as Accelerate Big Model Inference, Megatron-DeepSpeed Inference etc. Support is also available for converting Transformers sharded checkpoints to Megatron-LM checkpoint of variable tensor and pipeline parallel sizes for large scale training. ## Pre-Requisites You will need to install the latest pytorch, cuda, nccl, and NVIDIA [APEX](https://github.com/NVIDIA/apex#quick-start) releases and the nltk library. See [documentation](https://github.com/NVIDIA/Megatron-LM#setup) for more details. Another way to setup the environment is to pull an NVIDIA PyTorch Container that comes with all the required installations from NGC. Below is a step-by-step method to set up the conda environment: 1. Create a virtual environment ``` conda create --name ml ``` 2. Assuming that the machine has CUDA 11.3 installed, installing the corresponding PyTorch GPU Version ``` conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch ``` 3. Install Nvidia APEX ``` git clone https://github.com/NVIDIA/apex cd apex pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ cd .. ``` 4. Installing Megatron-LM ``` git clone https://github.com/NVIDIA/Megatron-LM.git cd Megatron-LM git checkout core_r0.5.0 pip install --no-use-pep517 -e . ``` ## Accelerate Megatron-LM Plugin Important features are directly supported via the `accelerate config` command. An example of the corresponding questions for using Megatron-LM features is shown below: ```bash :~$ accelerate config --config_file "megatron_gpt_config.yaml" In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0 Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): 2 How many different machines will you use (use more than 1 for multi-node training)? [1]: Do you want to use DeepSpeed? [yes/NO]: Do you want to use FullyShardedDataParallel? [yes/NO]: Do you want to use Megatron-LM ? [yes/NO]: yes What is the Tensor Parallelism degree/size? [1]:2 Do you want to enable Sequence Parallelism? [YES/no]: What is the Pipeline Parallelism degree/size? [1]:2 What is the number of micro-batches? [1]:2 Do you want to enable selective activation recomputation? [YES/no]: Do you want to use distributed optimizer which shards optimizer state and gradients across data parallel ranks? [YES/no]: What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: How many GPU(s) should be used for distributed training? [1]:4 Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16 ``` The resulting config is shown below: ``` ~$ cat megatron_gpt_config.yaml compute_environment: LOCAL_MACHINE deepspeed_config: {} distributed_type: MEGATRON_LM downcast_bf16: 'no' fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: megatron_lm_gradient_clipping: 1.0 megatron_lm_num_micro_batches: 2 megatron_lm_pp_degree: 2 megatron_lm_recompute_activations: true megatron_lm_sequence_parallelism: true megatron_lm_tp_degree: 2 megatron_lm_use_distributed_optimizer: true mixed_precision: bf16 num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true use_cpu: false ``` We will take the example of GPT pre-training. The minimal changes required to the official `run_clm_no_trainer.py` to use Megatron-LM are as follows: 1. As Megatron-LM uses its own implementation of Optimizer, the corresponding scheduler compatible with it needs to be used. As such, support for only the Megatron-LM's scheduler is present. User will need to create `accelerate.utils.MegatronLMDummyScheduler`. Example is given below: ```python from accelerate.utils import MegatronLMDummyScheduler if accelerator.distributed_type == DistributedType.MEGATRON_LM: lr_scheduler = MegatronLMDummyScheduler( optimizer=optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps, ) else: lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) ``` 2. Getting the details of the total batch size now needs to be cognization of tensor and pipeline parallel sizes. Example of getting the effective total batch size is shown below: ```python if accelerator.distributed_type == DistributedType.MEGATRON_LM: total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size else: total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps ``` 3. When using Megatron-LM, the losses are already averaged across the data parallel group ```python if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses.append(loss) else: losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses = torch.tensor(losses) else: losses = torch.cat(losses) ``` 4. For Megatron-LM, we need to save the model using `accelerator.save_state` ```python if accelerator.distributed_type == DistributedType.MEGATRON_LM: accelerator.save_state(args.output_dir) else: unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) ``` That's it! We are good to go 🚀. Please find the example script in the examples folder at the path `accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py`. Let's run it for `gpt-large` model architecture using 4 A100-80GB GPUs. ```bash accelerate launch --config_file megatron_gpt_config.yaml \ examples/by_feature/megatron_lm_gpt_pretraining.py \ --config_name "gpt2-large" \ --tokenizer_name "gpt2-large" \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --block_size 1024 \ --learning_rate 5e-5 \ --per_device_train_batch_size 24 \ --per_device_eval_batch_size 24 \ --num_train_epochs 5 \ --with_tracking \ --report_to "wandb" \ --output_dir "awesome_model" ``` Below are some important excerpts from the output logs: ```bash Loading extension module fused_dense_cuda... >>> done with compiling and loading fused kernels. Compilation time: 3.569 seconds > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) Building gpt model in the pre-training mode. The Megatron LM model weights are initialized at random in `accelerator.prepare`. Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup. Preparing dataloader Preparing dataloader Preparing model > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 210753280 > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 209445120 > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 210753280 > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 209445120 Preparing optimizer Preparing scheduler > learning rate decay style: linear 10/10/2022 22:57:22 - INFO - __main__ - ***** Running training ***** 10/10/2022 22:57:22 - INFO - __main__ - Num examples = 2318 10/10/2022 22:57:22 - INFO - __main__ - Num Epochs = 5 10/10/2022 22:57:22 - INFO - __main__ - Instantaneous batch size per device = 24 10/10/2022 22:57:22 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 48 10/10/2022 22:57:22 - INFO - __main__ - Gradient Accumulation steps = 1 10/10/2022 22:57:22 - INFO - __main__ - Total optimization steps = 245 20%|████████████▍ | 49/245 [01:04<04:09, 1.27s/it] 10/10/2022 22:58:29 - INFO - __main__ - epoch 0: perplexity: 1222.1594275215962 eval_loss: 7.10837459564209 40%|████████████████████████▊ | 98/245 [02:10<03:07, 1.28s/it] 10/10/2022 22:59:35 - INFO - __main__ - epoch 1: perplexity: 894.5236583794557 eval_loss: 6.796291351318359 60%|████████████████████████████████████▌ | 147/245 [03:16<02:05, 1.28s/it] 10/10/2022 23:00:40 - INFO - __main__ - epoch 2: perplexity: 702.8458788508042 eval_loss: 6.555137634277344 80%|████████████████████████████████████████████████▊ | 196/245 [04:22<01:02, 1.28s/it] 10/10/2022 23:01:46 - INFO - __main__ - epoch 3: perplexity: 600.3220028695281 eval_loss: 6.39746618270874 100%|█████████████████████████████████████████████████████████████| 245/245 [05:27<00:00, 1.28s/it] ``` There are a large number of other options/features that one can set using `accelerate.utils.MegatronLMPlugin`. ## Advanced features to leverage writing custom train step and Megatron-LM Indexed Datasets For leveraging more features, please go through below details. 1. Below is an example of changes required to customize the Train Step while using Megatron-LM. You will implement the `accelerate.utils.AbstractTrainStep` or inherit from their corresponding children `accelerate.utils.GPTTrainStep`, `accelerate.utils.BertTrainStep` or `accelerate.utils.T5TrainStep`. ```python from accelerate.utils import MegatronLMDummyScheduler, GPTTrainStep, avg_losses_across_data_parallel_group # Custom loss function for the Megatron model class GPTTrainStepWithCustomLoss(GPTTrainStep): def __init__(self, megatron_args, **kwargs): super().__init__(megatron_args) self.kwargs = kwargs def get_loss_func(self): def loss_func(inputs, loss_mask, output_tensor): batch_size, seq_length = output_tensor.shape losses = output_tensor.float() loss_mask = loss_mask.view(-1).float() loss = losses.view(-1) * loss_mask # Resize and average loss per sample loss_per_sample = loss.view(batch_size, seq_length).sum(axis=1) loss_mask_per_sample = loss_mask.view(batch_size, seq_length).sum(axis=1) loss_per_sample = loss_per_sample / loss_mask_per_sample # Calculate and scale weighting weights = torch.stack([(inputs == kt).float() for kt in self.kwargs["keytoken_ids"]]).sum(axis=[0, 2]) weights = 1.0 + self.kwargs["alpha"] * weights # Calculate weighted average weighted_loss = (loss_per_sample * weights).mean() # Reduce loss across data parallel groups averaged_loss = avg_losses_across_data_parallel_group([weighted_loss]) return weighted_loss, {"lm loss": averaged_loss[0]} return loss_func def get_forward_step_func(self): def forward_step(data_iterator, model): """Forward step.""" # Get the batch. tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator) output_tensor = model(tokens, position_ids, attention_mask, labels=labels) return output_tensor, partial(self.loss_func, tokens, loss_mask) return forward_step def main(): # Custom loss function for the Megatron model keytoken_ids = [] keywords = ["plt", "pd", "sk", "fit", "predict", " plt", " pd", " sk", " fit", " predict"] for keyword in keywords: ids = tokenizer([keyword]).input_ids[0] if len(ids) == 1: keytoken_ids.append(ids[0]) accelerator.print(f"Keytoken ids: {keytoken_ids}") accelerator.state.megatron_lm_plugin.custom_train_step_class = GPTTrainStepWithCustomLoss accelerator.state.megatron_lm_plugin.custom_train_step_kwargs = { "keytoken_ids": keytoken_ids, "alpha": 0.25, } ``` 2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won't be available and this requires tweaks to the training loop. Being able to do all this shows how flexible and extensible Accelerate is. The changes required are as follows. a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader` and pass the required dataset args to it such as `data_path`, `seq_length` etc. See [here](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/arguments.py#L804) for the list of available args. ```python from accelerate.utils import MegatronLMDummyDataLoader megatron_dataloader_config = { "data_path": args.data_path, "splits_string": args.splits_string, "seq_length": args.block_size, "micro_batch_size": args.per_device_train_batch_size, } megatron_dataloader = MegatronLMDummyDataLoader(**megatron_dataloader_config) accelerator.state.megatron_lm_plugin.megatron_dataset_flag = True ``` b. `megatron_dataloader` is repeated 3 times to get training, validation and test dataloaders as per the `args.splits_string` proportions ```python model, optimizer, lr_scheduler, train_dataloader, eval_dataloader, _ = accelerator.prepare( model, optimizer, lr_scheduler, megatron_dataloader, megatron_dataloader, megatron_dataloader ) ``` c. Changes to training and evaluation loops as dataloader is only available on tensor parallel ranks 0 So, we need to iterate only if the dataloader isn't `None` else provide empty dict As such, we loop using `while` loop and break when `completed_steps` is equal to `args.max_train_steps` This is similar to the Megatron-LM setup wherein user has to provide `max_train_steps` when using Megaton-LM indexed datasets. This displays how flexible and extensible Accelerate is. ```python while completed_steps < args.max_train_steps: model.train() batch = next(train_dataloader) if train_dataloader is not None else {} outputs = model(**batch) loss = outputs.loss ... if completed_steps % eval_interval == 0: eval_completed_steps = 0 losses = [] while eval_completed_steps < eval_iters: model.eval() with torch.no_grad(): batch = next(eval_dataloader) if eval_dataloader is not None else {} outputs = model(**batch) ``` ## Utility for Checkpoint reshaping and interoperability 1. The scripts for these are present in Transformers library under respective models. Currently, it is available for GPT model [checkpoint_reshaping_and_interoperability.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py) 2. Below is an example of conversion of checkpoint from Megatron-LM to universal Transformers sharded checkpoint. ```bash python checkpoint_reshaping_and_interoperability.py \ --convert_checkpoint_from_megatron_to_transformers \ --load_path "gpt/iter_0005000" \ --save_path "gpt/trfs_checkpoint" \ --max_shard_size "200MB" \ --tokenizer_name "gpt2" \ --print-checkpoint-structure ``` 3. Conversion of checkpoint from transformers to megatron with `tp_size=2`, `pp_size=2` and `dp_size=2`. ```bash python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability.py \ --load_path "gpt/trfs_checkpoint" \ --save_path "gpt/megatron_lm_checkpoint" \ --target_tensor_model_parallel_size 2 \ --target_pipeline_model_parallel_size 2 \ --target_data_parallel_size 2 \ --target_params_dtype "bf16" \ --make_vocab_size_divisible_by 128 \ --use_distributed_optimizer \ --print-checkpoint-structure ``` ## Megatron-LM GPT models support returning logits and `megatron_generate` function for text generation 1. Returning logits require setting `require_logits=True` in MegatronLMPlugin as shown below. These would be available on the in the last stage of pipeline. ```python megatron_lm_plugin = MegatronLMPlugin(return_logits=True) ``` 2. `megatron_generate` method for Megatron-LM GPT model: This will use Tensor and Pipeline Parallelism to complete generations for a batch of inputs when using greedy with/without top_k/top_p sampling and for individual prompt inputs when using beam search decoding. Only a subset of features of transformers generate is supported. This will help in using large models via tensor and pipeline parallelism for generation (already does key-value caching and uses fused kernels by default). This requires data parallel size to be 1, sequence parallelism and activation checkpointing to be disabled. It also requires specifying path to tokenizer's vocab file and merges file. Below example shows how to configure and use `megatron_generate` method for Megatron-LM GPT model. ```python # specifying tokenizer's vocab and merges file vocab_file = os.path.join(args.resume_from_checkpoint, "vocab.json") merge_file = os.path.join(args.resume_from_checkpoint, "merges.txt") other_megatron_args = {"vocab_file": vocab_file, "merge_file": merge_file} megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) # inference using `megatron_generate` functionality tokenizer.pad_token = tokenizer.eos_token max_new_tokens = 64 batch_texts = [ "Are you human?", "The purpose of life is", "The arsenal was constructed at the request of", "How are you doing these days?", ] batch_encodings = tokenizer(batch_texts, return_tensors="pt", padding=True) # top-p sampling generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, top_p=0.8, top_p_decay=0.5, temperature=0.9, ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) # top-k sampling generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, top_k=50, temperature=0.9, ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) # adding `bos` token at the start generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, add_BOS=True ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) # beam search => only takes single prompt batch_texts = ["The purpose of life is"] batch_encodings = tokenizer(batch_texts, return_tensors="pt", padding=True) generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, num_beams=20, length_penalty=1.5, ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) ``` 3. An end-to-end example of using `megatron_generate` method for Megatron-LM GPT model is available at [megatron_gpt2_generation.py](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/inference/megatron_gpt2_generation.py) with config file [megatron_lm_gpt_generate_config.yaml](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/Configs/megatron_lm_gpt_generate_config.yaml). The bash script with accelerate launch command is available at [megatron_lm_gpt_generate.sh](https://github.com/pacman100/accelerate-megatron-test/blob/main/megatron_lm_gpt_generate.sh). The output logs of the script are available at [megatron_lm_gpt_generate.log](https://github.com/pacman100/accelerate-megatron-test/blob/main/output_logs/megatron_lm_gpt_generate.log). ## Support for ROPE and ALiBi Positional embeddings and Multi-Query Attention 1. For ROPE/ALiBi attention, pass `position_embedding_type` with `("absolute" | "rotary" | "alibi")` to `MegatronLMPlugin` as shown below. ```python other_megatron_args = {"position_embedding_type": "alibi"} megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) ``` 2. For Multi-Query Attention, pass `attention_head_type` with `("multihead" | "multiquery")` to `MegatronLMPlugin` as shown below. ```python other_megatron_args = {"attention_head_type": "multiquery"} megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) ``` ## Caveats 1. Supports Transformers GPT2, Megatron-BERT and T5 models. This covers Decoder only, Encode only and Encoder-Decoder model classes. 2. Only loss is returned from model forward pass as there is quite complex interplay of pipeline, tensor and data parallelism behind the scenes. The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks. This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and you can easily compute the `perplexity` using the loss. For GPT model, returning logits in addition to loss(es) is supported. These logits aren't gathered across data parallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups` to gather logits across data parallel ranks. These logits along with labels can be used for computing various performance metrics. 3. The main process is the last rank as the losses/logits are available in the last stage of pipeline. `accelerator.is_main_process` and `accelerator.is_local_main_process` return `True` for last rank when using Megatron-LM integration. 4. In `accelerator.prepare` call, a Megatron-LM model corresponding to a given Transformers model is created with random weights. Please use `accelerator.load_state` to load the Megatron-LM checkpoint with matching TP, PP and DP partitions. 5. Currently, checkpoint reshaping and interoperability support is only available for GPT. Soon it will be extended to BERT and T5. 6. `gradient_accumulation_steps` needs to be 1. When using Megatron-LM, micro batches in pipeline parallelism setting is synonymous with gradient accumulation. 7. When using Megatron-LM, use `accelerator.save_state` and `accelerator.load_state` for saving and loading checkpoints. 8. Below are the mapping from Megatron-LM model architectures to the the equivalent transformers model architectures. Only these transformers model architectures are supported. a. Megatron-LM [BertModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/bert_model.py) : transformers models with `megatron-bert` in config's model type, e.g., [MegatronBERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert) b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py) : transformers models with `gpt2` in config's model type, e.g., [OpenAI GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2) c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) : transformers models with `t5` in config's model type, e.g., [T5](https://huggingface.co/docs/transformers/model_doc/t5) and [MT5](https://huggingface.co/docs/transformers/model_doc/mt5)
accelerate/docs/source/usage_guides/megatron_lm.md/0
{ "file_path": "accelerate/docs/source/usage_guides/megatron_lm.md", "repo_id": "accelerate", "token_count": 9538 }
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import os import threading import evaluate import psutil import torch from datasets import load_dataset from torch.distributed.fsdp.fully_sharded_data_parallel import FullOptimStateDictConfig, FullStateDictConfig from torch.utils.data import DataLoader from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed, ) from accelerate import Accelerator, DistributedType, FullyShardedDataParallelPlugin from accelerate.utils import is_npu_available, is_xpu_available ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # - FSDP # # This example also demonstrates the checkpointing and sharding capabilities # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 # New Code # # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # New Code # # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.xpu.memory_allocated() elif is_npu_available(): torch.npu.empty_cache() torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.npu.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() self.end = torch.xpu.memory_allocated() self.peak = torch.xpu.max_memory_allocated() elif is_npu_available(): torch.npu.empty_cache() self.end = torch.npu.memory_allocated() self.peak = torch.npu.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # New Code # # Pass the advanced FSDP settings not part of the accelerate config by creating fsdp_plugin fsdp_plugin = FullyShardedDataParallelPlugin( state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False), optim_state_dict_config=FullOptimStateDictConfig(offload_to_cpu=False, rank0_only=False), ) # Initialize accelerator if args.with_tracking: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="wandb", project_dir=args.logging_dir, fsdp_plugin=fsdp_plugin, ) else: accelerator = Accelerator(fsdp_plugin=fsdp_plugin) accelerator.print(accelerator.distributed_type) if hasattr(args.checkpointing_steps, "isdigit"): if args.checkpointing_steps == "epoch": checkpointing_steps = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: checkpointing_steps = None # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: experiment_config = vars(args) accelerator.init_trackers("fsdp_glue_no_trainer", experiment_config) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) datasets = load_dataset("glue", "mrpc") metric = evaluate.load("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) set_seed(seed) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained( args.model_name_or_path, return_dict=True, low_cpu_mem_usage=True ) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": 0.003, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=lr, weight_decay=2e-4) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=10, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) overall_step = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: num_epochs -= int(training_difference.replace("epoch_", "")) resume_step = None else: resume_step = int(training_difference.replace("step_", "")) num_epochs -= resume_step // len(train_dataloader) # If resuming by step, we also need to know exactly how far into the DataLoader we went resume_step = (num_epochs * len(train_dataloader)) - resume_step # Now we train the model for epoch in range(num_epochs): # New Code # # context manager to track the peak memory usage during the training epoch with TorchTracemalloc() as tracemalloc: model.train() if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == 0: if resume_step is not None and step < resume_step: pass # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() # accelerator.print(lr_scheduler.get_lr()) overall_step += 1 if isinstance(checkpointing_steps, int): output_dir = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # New Code # # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}") accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}") accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") accelerator.print( f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) # Logging the peak memory usage of the GPU to the tracker if args.with_tracking: accelerator.log( { "train_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin), }, step=epoch, ) # New Code # # context manager to track the peak memory usage during the evaluation with TorchTracemalloc() as tracemalloc: model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(train_dataloader), }, step=epoch, ) if checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # New Code # # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"Memory before entering the eval : {b2mb(tracemalloc.begin)}") accelerator.print(f"Memory consumed at the end of the eval (end-begin): {tracemalloc.used}") accelerator.print(f"Peak Memory consumed during the eval (max-begin): {tracemalloc.peaked}") accelerator.print( f"Total Peak Memory consumed during the eval (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) # Logging the peak memory usage of the GPU to the tracker if args.with_tracking: accelerator.log( { "eval_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin), }, step=epoch, ) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help="Location on where to store experiment tracking logs`", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main()
accelerate/examples/by_feature/fsdp_with_peak_mem_tracking.py/0
{ "file_path": "accelerate/examples/by_feature/fsdp_with_peak_mem_tracking.py", "repo_id": "accelerate", "token_count": 7835 }
# Distributed inference examples with PiPPy This repo contains a variety of tutorials for using the [PiPPy](https://github.com/PyTorch/PiPPy) pipeline parallelism library with accelerate. You will find examples covering: 1. How to trace the model using `accelerate.prepare_pippy` 2. How to specify inputs based on what the model expects (when to use `kwargs`, `args`, and such) 3. How to gather the results at the end. ## Installation This requires the `main` branch of accelerate (or a version at least 0.27.0), `pippy` version of 0.2.0 or greater, and at least python 3.9. Please install using `pip install .` to pull from the `setup.py` in this repo, or run manually: ```bash pip install 'accelerate>=0.27.0' 'torchpippy>=0.2.0' ``` ## Running code You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script: ```bash accelerate launch bert.py ``` Or: ```bash accelerate launch --num_processes {NUM_GPUS} bert.py ``` Or: ```bash torchrun --nproc-per-node {NUM_GPUS} bert.py ``` ## General speedups One can expect that PiPPy will outperform native model parallism by a multiplicative factor since all GPUs are running at all times with inputs, rather than one input being passed through a GPU at a time waiting for the prior to finish. Below are some benchmarks we have found when using the accelerate-pippy integration for a few models when running on 2x4090's: ### Bert | | Accelerate/Sequential | PiPPy + Accelerate | |---|---|---| | First batch | 0.2137s | 0.3119s | | Average of 5 batches | 0.0099s | **0.0062s** | ### GPT2 | | Accelerate/Sequential | PiPPy + Accelerate | |---|---|---| | First batch | 0.1959s | 0.4189s | | Average of 5 batches | 0.0205s | **0.0126s** | ### T5 | | Accelerate/Sequential | PiPPy + Accelerate | |---|---|---| | First batch | 0.2789s | 0.3809s | | Average of 5 batches | 0.0198s | **0.0166s** |
accelerate/examples/inference/pippy/README.md/0
{ "file_path": "accelerate/examples/inference/pippy/README.md", "repo_id": "accelerate", "token_count": 646 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from manim import * class Stage3(Scene): def construct(self): mem = Rectangle(height=0.5,width=0.5) meta_mem = Rectangle(height=0.25,width=0.25) fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) cpu_left_col_base = [mem.copy() for i in range(6)] cpu_right_col_base = [mem.copy() for i in range(6)] cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0) cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0) cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0) cpu_text = Text("CPU", font_size=24) cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) cpu.move_to([-2.5,-.5,0]) self.add(cpu) gpu_base = [mem.copy() for i in range(4)] gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0) gpu_text = Text("GPU", font_size=24) gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) gpu.move_to([-1,-1,0]) self.add(gpu) model_base = [mem.copy() for i in range(6)] model_rect = VGroup(*model_base).arrange(RIGHT,buff=0) model_text = Text("Model", font_size=24) model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) model.move_to([3, -1., 0]) self.add(model) model_arr = [] model_cpu_arr = [] model_meta_arr = [] for i,rect in enumerate(model_base): rect.set_stroke(YELLOW) cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP) cpu_target.set_x(cpu_target.get_x()+0.1) elif i == 3: cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.) else: cpu_target.next_to(model_cpu_arr[i-1], direction=RIGHT, buff=0.) self.add(cpu_target) model_cpu_arr.append(cpu_target) self.add(*model_arr, *model_cpu_arr, *model_meta_arr) checkpoint_base = [mem.copy() for i in range(6)] checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0) checkpoint_text = Text("Loaded Checkpoint", font_size=24) checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) checkpoint.move_to([3, .5, 0]) self.add(checkpoint) ckpt_arr = [] ckpt_cpu_arr = [] for i,rect in enumerate(checkpoint_base): target = fill.copy().set_fill(BLUE, opacity=0.7) target.move_to(rect) ckpt_arr.append(target) cpu_target = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i+1]) else: cpu_target.move_to(cpu_right_col_base[i-5]) ckpt_cpu_arr.append(cpu_target) self.add(*ckpt_arr, *ckpt_cpu_arr) key = Square(side_length=2.2) key.move_to([-5, 2, 0]) key_text = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, ) key_text.move_to([-5, 2.4, 0]) self.add(key_text, key) blue_text = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint", font_size=18, ) blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left()) self.add(blue_text) step_3 = MarkupText( f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.', font_size=24 ) step_3.move_to([2, 2, 0]) disk_left_col_base = [meta_mem.copy() for i in range(6)] disk_right_col_base = [meta_mem.copy() for i in range(6)] disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0) disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0) disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0) disk_text = Text("Disk", font_size=24) disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) disk.move_to([-4.,-1.25,0]) self.play( Write(step_3, run_time=3), Write(disk_text, run_time=1), Create(disk_rects, run_time=1) ) animations = [] for i,rect in enumerate(ckpt_cpu_arr): target = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i]).scale(0.5) animations.append(MoveToTarget(target, run_time=1.5)) self.play(*animations) self.play(FadeOut(step_3)) step_4 = MarkupText( f'Then, the checkpoint is removed from memory\nthrough garbage collection.', font_size=24 ) step_4.move_to([2, 2, 0]) self.play( Write(step_4, run_time=3) ) self.play( FadeOut(checkpoint_rect, checkpoint_text, *ckpt_arr, *ckpt_cpu_arr), ) self.wait()
accelerate/manim_animations/big_model_inference/stage_3.py/0
{ "file_path": "accelerate/manim_animations/big_model_inference/stage_3.py", "repo_id": "accelerate", "token_count": 2891 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from pathlib import Path from typing import List import numpy as np import torch from safetensors.torch import load_model from torch.cuda.amp import GradScaler from .utils import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_MODEL_NAME, SAFE_WEIGHTS_NAME, SAMPLER_NAME, SCALER_NAME, SCHEDULER_NAME, WEIGHTS_NAME, get_pretty_name, is_mlu_available, is_musa_available, is_torch_xla_available, is_xpu_available, load, save, ) if is_torch_xla_available(): import torch_xla.core.xla_model as xm from .logging import get_logger from .state import PartialState logger = get_logger(__name__) def save_accelerator_state( output_dir: str, model_states: List[dict], optimizers: list, schedulers: list, dataloaders: list, process_index: int, step: int, scaler: GradScaler = None, save_on_each_node: bool = False, safe_serialization: bool = True, ): """ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory. <Tip> If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native `pickle`. </Tip> Args: output_dir (`str` or `os.PathLike`): The name of the folder to save all relevant weights and states. model_states (`List[torch.nn.Module]`): A list of model states optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers dataloaders (`List[torch.utils.data.DataLoader]`): A list of dataloader instances to save their sampler states process_index (`int`): The current process index in the Accelerator state step (`int`): The current step in the internal step tracker scaler (`torch.amp.GradScaler`, *optional*): An optional gradient scaler instance to save; save_on_each_node (`bool`, *optional*): Whether to save on every node, or only the main node. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). """ output_dir = Path(output_dir) # Model states for i, state in enumerate(model_states): weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME if i > 0: weights_name = weights_name.replace(".", f"_{i}.") output_model_file = output_dir.joinpath(weights_name) save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization) logger.info(f"Model weights saved in {output_model_file}") # Optimizer states for i, opt in enumerate(optimizers): state = opt.state_dict() optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" output_optimizer_file = output_dir.joinpath(optimizer_name) save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False) logger.info(f"Optimizer state saved in {output_optimizer_file}") # Scheduler states for i, scheduler in enumerate(schedulers): state = scheduler.state_dict() scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" output_scheduler_file = output_dir.joinpath(scheduler_name) save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False) logger.info(f"Scheduler state saved in {output_scheduler_file}") # DataLoader states for i, dataloader in enumerate(dataloaders): sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" output_sampler_file = output_dir.joinpath(sampler_name) # Only save if we have our custom sampler from .data_loader import IterableDatasetShard, SeedableRandomSampler if isinstance(dataloader.dataset, IterableDatasetShard): sampler = dataloader.get_sampler() if isinstance(sampler, SeedableRandomSampler): save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False) if getattr(dataloader, "use_stateful_dataloader", False): dataloader_state_dict_name = "dl_state_dict.bin" if i == 0 else f"dl_state_dict_{i}.bin" output_dataloader_state_dict_file = output_dir.joinpath(dataloader_state_dict_name) state_dict = dataloader.state_dict() torch.save(state_dict, output_dataloader_state_dict_file) logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}") # GradScaler state if scaler is not None: state = scaler.state_dict() output_scaler_file = output_dir.joinpath(SCALER_NAME) torch.save(state, output_scaler_file) logger.info(f"Gradient scaler state saved in {output_scaler_file}") # Random number generator states states = {} states_name = f"{RNG_STATE_NAME}_{process_index}.pkl" states["step"] = step states["random_state"] = random.getstate() states["numpy_random_seed"] = np.random.get_state() states["torch_manual_seed"] = torch.get_rng_state() if is_xpu_available(): states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all() if is_mlu_available(): states["torch_mlu_manual_seed"] = torch.mlu.get_rng_state_all() if is_musa_available(): states["torch_musa_manual_seed"] = torch.musa.get_rng_state_all() else: states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all() if is_torch_xla_available(): states["xm_seed"] = xm.get_rng_state() output_states_file = output_dir.joinpath(states_name) torch.save(states, output_states_file) logger.info(f"Random states saved in {output_states_file}") return output_dir def load_accelerator_state( input_dir, models, optimizers, schedulers, dataloaders, process_index, scaler=None, map_location=None, **load_model_func_kwargs, ): """ Loads states of the models, optimizers, scaler, and RNG generators from a given directory. Args: input_dir (`str` or `os.PathLike`): The name of the folder to load all relevant weights and states. models (`List[torch.nn.Module]`): A list of model instances optimizers (`List[torch.optim.Optimizer]`): A list of optimizer instances schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): A list of learning rate schedulers process_index (`int`): The current process index in the Accelerator state scaler (`torch.amp.GradScaler`, *optional*): An optional *GradScaler* instance to load map_location (`str`, *optional*): What device to load the optimizer state onto. Should be one of either "cpu" or "on_device". load_model_func_kwargs (`dict`, *optional*): Additional arguments that can be passed to the model's `load_state_dict` method. Returns: `dict`: Contains the `Accelerator` attributes to override while loading the state. """ # stores the `Accelerator` attributes to override override_attributes = dict() if map_location not in [None, "cpu", "on_device"]: raise TypeError( "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`" ) if map_location is None: map_location = "cpu" elif map_location == "on_device": map_location = PartialState().device input_dir = Path(input_dir) # Model states for i, model in enumerate(models): ending = f"_{i}" if i > 0 else "" input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors") if input_model_file.exists(): load_model(model, input_model_file, device=str(map_location), **load_model_func_kwargs) else: # Load with torch input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin") state_dict = load(input_model_file, map_location=map_location) model.load_state_dict(state_dict, **load_model_func_kwargs) logger.info("All model weights loaded successfully") # Optimizer states for i, opt in enumerate(optimizers): optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" input_optimizer_file = input_dir.joinpath(optimizer_name) optimizer_state = load(input_optimizer_file, map_location=map_location) optimizers[i].load_state_dict(optimizer_state) logger.info("All optimizer states loaded successfully") # Scheduler states for i, scheduler in enumerate(schedulers): scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" input_scheduler_file = input_dir.joinpath(scheduler_name) scheduler_state = load(input_scheduler_file) scheduler.load_state_dict(scheduler_state) logger.info("All scheduler states loaded successfully") for i, dataloader in enumerate(dataloaders): sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" input_sampler_file = input_dir.joinpath(sampler_name) # Only load if we have our custom sampler from .data_loader import IterableDatasetShard, SeedableRandomSampler if isinstance(dataloader.dataset, IterableDatasetShard): sampler = dataloader.get_sampler() if isinstance(sampler, SeedableRandomSampler): sampler = dataloader.set_sampler(load(input_sampler_file)) if getattr(dataloader, "use_stateful_dataloader", False): dataloader_state_dict_name = "dl_state_dict.bin" if i == 0 else f"dl_state_dict_{i}.bin" input_dataloader_state_dict_file = input_dir.joinpath(dataloader_state_dict_name) if input_dataloader_state_dict_file.exists(): state_dict = load(input_dataloader_state_dict_file) dataloader.load_state_dict(state_dict) logger.info("All dataloader sampler states loaded successfully") # GradScaler state if scaler is not None: input_scaler_file = input_dir.joinpath(SCALER_NAME) scaler_state = load(input_scaler_file) scaler.load_state_dict(scaler_state) logger.info("GradScaler state loaded successfully") # Random states try: states = load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl")) if "step" in states: override_attributes["step"] = states["step"] random.setstate(states["random_state"]) np.random.set_state(states["numpy_random_seed"]) torch.set_rng_state(states["torch_manual_seed"]) if is_xpu_available(): torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"]) if is_mlu_available(): torch.mlu.set_rng_state_all(states["torch_mlu_manual_seed"]) if is_musa_available(): torch.musa.set_rng_state_all(states["torch_musa_manual_seed"]) else: torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"]) if is_torch_xla_available(): xm.set_rng_state(states["xm_seed"]) logger.info("All random states loaded successfully") except Exception: logger.info("Could not load random states") return override_attributes def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False): """ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl` """ # Should this be the right way to get a qual_name type value from `obj`? save_location = Path(path) / f"custom_checkpoint_{index}.pkl" logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}") save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node) def load_custom_state(obj, path, index: int = 0): """ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`. Will always set `weights_only=False` when loading the state. """ load_location = f"{path}/custom_checkpoint_{index}.pkl" logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}") obj.load_state_dict(load(load_location, map_location="cpu", weights_only=False))
accelerate/src/accelerate/checkpointing.py/0
{ "file_path": "accelerate/src/accelerate/checkpointing.py", "repo_id": "accelerate", "token_count": 5491 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") class AcceleratedScheduler: """ A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed precision training) When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always step the scheduler to account for it. Args: scheduler (`torch.optim.lr_scheduler._LRScheduler`): The scheduler to wrap. optimizers (one or a list of `torch.optim.Optimizer`): The optimizers used. step_with_optimizer (`bool`, *optional*, defaults to `True`): Whether or not the scheduler should be stepped at each optimizer step. split_batches (`bool`, *optional*, defaults to `False`): Whether or not the dataloaders split one batch across the different processes (so batch size is the same regardless of the number of processes) or create batches on each process (so batch size is the original batch size multiplied by the number of processes). """ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False): self.scheduler = scheduler self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers] self.split_batches = split_batches self.step_with_optimizer = step_with_optimizer self.gradient_state = GradientState() def step(self, *args, **kwargs): if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*args, **kwargs) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*args, **kwargs) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step num_processes = AcceleratorState().num_processes for _ in range(num_processes): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler, "total_steps"): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*args, **kwargs) else: self.scheduler.step(*args, **kwargs) # Passthroughs def get_last_lr(self): return self.scheduler.get_last_lr() def state_dict(self): return self.scheduler.state_dict() def load_state_dict(self, state_dict): self.scheduler.load_state_dict(state_dict) def get_lr(self): return self.scheduler.get_lr() def print_lr(self, *args, **kwargs): return self.scheduler.print_lr(*args, **kwargs)
accelerate/src/accelerate/scheduler.py/0
{ "file_path": "accelerate/src/accelerate/scheduler.py", "repo_id": "accelerate", "token_count": 1577 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import logging import shutil from pathlib import Path import torch from safetensors.torch import load_file from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy, StateDictType from torch.utils.data import DataLoader from accelerate import Accelerator, FullyShardedDataParallelPlugin from accelerate.commands.merge import merge_command, merge_command_parser from accelerate.state import AcceleratorState from accelerate.test_utils.training import RegressionDataset from accelerate.utils import merge_fsdp_weights, patch_environment, save_fsdp_model logging.basicConfig(level=logging.INFO) parser = merge_command_parser() class TinyModel(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(16, 16) self.activation = torch.nn.ReLU() self.linear2 = torch.nn.Linear(16, 16) self.softmax = torch.nn.Softmax() def forward(self, x): return self.linear2(self.activation(self.linear1(x))) def setup(): if AcceleratorState._shared_state != {}: AcceleratorState()._reset_state() plugin = FullyShardedDataParallelPlugin( sharding_strategy=ShardingStrategy.FULL_SHARD, state_dict_type=StateDictType.SHARDED_STATE_DICT ) model = TinyModel() with patch_environment(fsdp_auto_wrap_policy="SIZE_BASED_WRAP"): plugin.set_auto_wrap_policy(model) accelerator = Accelerator(fsdp_plugin=plugin) model = accelerator.prepare(model) return model, plugin, accelerator def mock_training(accelerator, model): train_set = RegressionDataset(length=128, seed=42) train_dl = DataLoader(train_set, batch_size=16, shuffle=False) optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() return model def check_weights(operation, state_1, state_2): for weight_1, weight_2 in zip(state_1.values(), state_2.values()): if str(weight_1.device) != "cuda": weight_1 = weight_1.to("cuda") if str(weight_2.device) != "cuda": weight_2 = weight_2.to("cuda") if operation == "same": assert torch.allclose(weight_1, weight_2) else: assert not torch.allclose(weight_1, weight_2) def check_safetensors_weights(path, model): safe_state_dict = load_file(path / "model.safetensors") safe_loaded_model = TinyModel() check_weights("diff", model.state_dict(), safe_loaded_model.state_dict()) safe_loaded_model.load_state_dict(safe_state_dict) check_weights("same", model.state_dict(), safe_loaded_model.state_dict()) def check_pytorch_weights(path, model): nonsafe_state_dict = torch.load(path / "pytorch_model.bin") nonsafe_loaded_model = TinyModel() check_weights("diff", model.state_dict(), nonsafe_loaded_model.state_dict()) nonsafe_loaded_model.load_state_dict(nonsafe_state_dict) check_weights("same", model.state_dict(), nonsafe_loaded_model.state_dict()) def test_merge_weights_safetensors(model, path): # Should now be saved at `path/merged.safetensors` merge_fsdp_weights(path / "pytorch_model_fsdp_0", path, safe_serialization=True) check_safetensors_weights(path, model) def test_merge_weights_command_safetensors(model, path): args = parser.parse_args([str(path / "pytorch_model_fsdp_0"), str(path)]) merge_command(args) check_safetensors_weights(path, model) def test_merge_weights_pytorch(model, path): # Should now be saved at `path/merged.bin` merge_fsdp_weights(path / "pytorch_model_fsdp_0", path, safe_serialization=False) check_pytorch_weights(path, model) def test_merge_weights_command_pytorch(model, path): args = parser.parse_args([str(path / "pytorch_model_fsdp_0"), str(path), "--unsafe_serialization"]) merge_command(args) check_pytorch_weights(path, model) if __name__ == "__main__": # Note this test requires at least two accelerators! model, plugin, accelerator = setup() if accelerator.num_processes > 1: try: # Initial setup for things out_path = Path("test_merge_weights_fsdp_weights") if not out_path.exists(): out_path.mkdir(parents=True, exist_ok=True) # Train briefly once weights aren't the baseline model = mock_training(accelerator, model) accelerator.wait_for_everyone() gc.collect() # Needed for some lingering refs after training save_fsdp_model(plugin, accelerator, model, out_path) accelerator.wait_for_everyone() # Finally we can test test_merge_weights_safetensors(model, out_path) test_merge_weights_command_safetensors(model, out_path) test_merge_weights_pytorch(model, out_path) test_merge_weights_command_pytorch(model, out_path) except Exception: raise finally: # Cleanup in case of any failures if accelerator.is_main_process: shutil.rmtree(out_path) accelerator.wait_for_everyone() accelerator.end_training()
accelerate/src/accelerate/test_utils/scripts/test_merge_weights.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_merge_weights.py", "repo_id": "accelerate", "token_count": 2351 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess import sys from ast import literal_eval from shutil import which from typing import Any, Dict, List, Tuple import torch from ..commands.config.config_args import SageMakerConfig from ..utils import ( DynamoBackend, PrecisionType, is_fp8_available, is_ipex_available, is_mlu_available, is_musa_available, is_npu_available, is_torch_xla_available, is_xpu_available, ) from ..utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS from ..utils.other import is_port_in_use, merge_dicts from .dataclasses import DistributedType, SageMakerDistributedType def _filter_args(args, parser, default_args=[]): """ Filters out all `accelerate` specific args """ new_args, _ = parser.parse_known_args(default_args) for key, value in vars(args).items(): if key in vars(new_args).keys(): setattr(new_args, key, value) return new_args def _get_mpirun_args(): """ Determines the executable and argument names for mpirun, based on the type of install. The supported MPI programs are: OpenMPI, Intel MPI, or MVAPICH. Returns: Program name and arg names for hostfile, num processes, and processes per node """ # Find the MPI program name mpi_apps = [x for x in ["mpirun", "mpiexec"] if which(x)] if len(mpi_apps) == 0: raise OSError("mpirun or mpiexec were not found. Ensure that Intel MPI, Open MPI, or MVAPICH are installed.") # Call the app with the --version flag to determine which MPI app is installed mpi_app = mpi_apps[0] mpirun_version = subprocess.check_output([mpi_app, "--version"]) if b"Open MPI" in mpirun_version: return mpi_app, "--hostfile", "-n", "--npernode", "--bind-to" else: # Intel MPI and MVAPICH both use the same arg names return mpi_app, "-f", "-n", "-ppn", "" def setup_fp8_env(args: argparse.Namespace, current_env: Dict[str, str]): """ Setup the FP8 environment variables. """ prefix = "ACCELERATE_" for arg in vars(args): if arg.startswith("fp8_"): value = getattr(args, arg) if value is not None: current_env[f"{prefix}{arg.upper()}"] = str(getattr(args, arg)) return current_env def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: """ Prepares and returns the command list and an environment with the correct simple launcher environment variables. """ cmd = [] if args.no_python and args.module: raise ValueError("--module and --no_python cannot be used together") if args.mpirun_hostfile is not None: mpi_app_name, hostfile_arg, num_proc_arg, proc_per_node_arg, bind_to_arg = _get_mpirun_args() mpirun_ccl = getattr(args, "mpirun_ccl", None) bind_to = getattr(args, "bind-to", "socket") num_machines = args.num_machines num_processes = getattr(args, "num_processes", None) nproc_per_node = str(num_processes // num_machines) if num_processes and num_machines else "1" cmd += [ mpi_app_name, hostfile_arg, args.mpirun_hostfile, proc_per_node_arg, nproc_per_node, ] if num_processes: cmd += [num_proc_arg, str(num_processes)] if bind_to_arg: cmd += [bind_to_arg, bind_to] if not args.no_python: cmd.append(sys.executable) if args.module: cmd.append("-m") cmd.append(args.training_script) cmd.extend(args.training_script_args) current_env = os.environ.copy() current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu) if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" if args.gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = args.gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = args.gpu_ids elif is_musa_available(): current_env["MUSA_VISIBLE_DEVICES"] = args.gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = args.gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids if args.num_machines > 1: current_env["MASTER_ADDR"] = args.main_process_ip current_env["MASTER_PORT"] = str(args.main_process_port) if args.mpirun_hostfile is not None: current_env["CCL_WORKER_COUNT"] = str(mpirun_ccl) elif args.num_processes > 1: current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1" current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500" try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) if args.mixed_precision.lower() == "fp8": if not is_fp8_available(): raise RuntimeError( "FP8 is not available on this machine. Please ensure that either Transformer Engine or MSAMP is installed." ) current_env = setup_fp8_env(args, current_env) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) if is_ipex_available(): current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower() current_env["ACCELERATE_USE_XPU"] = str(args.use_xpu).lower() if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" return cmd, current_env def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]: """ Prepares and returns an environment with the correct multi-GPU environment variables. """ num_processes = args.num_processes num_machines = args.num_machines main_process_ip = args.main_process_ip main_process_port = args.main_process_port if num_machines > 1: args.nproc_per_node = str(num_processes // num_machines) args.nnodes = str(num_machines) args.node_rank = int(args.machine_rank) if getattr(args, "same_network", False): args.master_addr = str(main_process_ip) args.master_port = str(main_process_port) else: args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" else: args.nproc_per_node = str(num_processes) if main_process_port is not None: args.master_port = str(main_process_port) if main_process_port is None: main_process_port = 29500 # only need to check port availability in main process, in case we have to start multiple launchers on the same machine # for some reasons like splitting log files. need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 if need_port_check and is_port_in_use(main_process_port): raise ConnectionError( f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." ) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: args.module = True elif args.no_python: args.no_python = True current_env = os.environ.copy() if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" gpu_ids = getattr(args, "gpu_ids", "all") if gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = gpu_ids elif is_musa_available(): current_env["MUSA_VISIBLE_DEVICES"] = gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids mixed_precision = args.mixed_precision.lower() try: mixed_precision = PrecisionType(mixed_precision) except ValueError: raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.") current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) if args.mixed_precision.lower() == "fp8": if not is_fp8_available(): raise RuntimeError( "FP8 is not available on this machine. Please ensure that either Transformer Engine or MSAMP is installed." ) current_env = setup_fp8_env(args, current_env) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) if args.use_fsdp: current_env["ACCELERATE_USE_FSDP"] = "true" if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states: raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`") current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy) current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower() current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params) if args.fsdp_auto_wrap_policy is not None: current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy) if args.fsdp_transformer_layer_cls_to_wrap is not None: current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap) if args.fsdp_backward_prefetch is not None: current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch) if args.fsdp_state_dict_type is not None: current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type) current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower() current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower() current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower() current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower() current_env["FSDP_ACTIVATION_CHECKPOINTING"] = str(args.fsdp_activation_checkpointing).lower() if args.use_tp: current_env["ACCELERATE_USE_TP"] = "true" current_env["TP_SIZE"] = str(args.tp_size) if args.use_megatron_lm: prefix = "MEGATRON_LM_" current_env["ACCELERATE_USE_MEGATRON_LM"] = "true" current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree) current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree) current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping) if args.megatron_lm_num_micro_batches is not None: current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches) if args.megatron_lm_sequence_parallelism is not None: current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism) if args.megatron_lm_recompute_activations is not None: current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations) if args.megatron_lm_use_distributed_optimizer is not None: current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer) current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" return current_env def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: """ Prepares and returns the command list and an environment with the correct DeepSpeed environment variables. """ num_processes = args.num_processes num_machines = args.num_machines main_process_ip = args.main_process_ip main_process_port = args.main_process_port cmd = None # make sure launcher is not None if args.deepspeed_multinode_launcher is None: # set to default pdsh args.deepspeed_multinode_launcher = DEEPSPEED_MULTINODE_LAUNCHERS[0] if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: cmd = ["deepspeed", "--no_local_rank"] cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)]) if args.deepspeed_exclusion_filter is not None: cmd.extend( [ "--exclude", str(args.deepspeed_exclusion_filter), ] ) elif args.deepspeed_inclusion_filter is not None: cmd.extend( [ "--include", str(args.deepspeed_inclusion_filter), ] ) else: cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)]) if main_process_ip: cmd.extend(["--master_addr", str(main_process_ip)]) cmd.extend(["--master_port", str(main_process_port)]) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: cmd.append("--module") elif args.no_python: cmd.append("--no_python") cmd.append(args.training_script) cmd.extend(args.training_script_args) elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]: args.nproc_per_node = str(num_processes // num_machines) args.nnodes = str(num_machines) args.node_rank = int(args.machine_rank) if getattr(args, "same_network", False): args.master_addr = str(main_process_ip) args.master_port = str(main_process_port) else: args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" else: args.nproc_per_node = str(num_processes) if main_process_port is not None: args.master_port = str(main_process_port) if main_process_port is None: main_process_port = 29500 # only need to check port availability in main process, in case we have to start multiple launchers on the same machine # for some reasons like splitting log files. need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 if need_port_check and is_port_in_use(main_process_port): raise ConnectionError( f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." ) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: args.module = True elif args.no_python: args.no_python = True current_env = os.environ.copy() if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" gpu_ids = getattr(args, "gpu_ids", "all") if gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = gpu_ids elif is_musa_available(): current_env["MUSA_VISIBLE_DEVICES"] = gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath(".")) current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) if args.mixed_precision.lower() == "fp8": if not is_fp8_available(): raise RuntimeError( "FP8 is not available on this machine. Please ensure that either Transformer Engine or MSAMP is installed." ) current_env = setup_fp8_env(args, current_env) current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower() current_env["ACCELERATE_USE_DEEPSPEED"] = "true" if args.zero_stage is not None: current_env["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage) if args.gradient_accumulation_steps is not None: current_env["ACCELERATE_GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps) if args.gradient_clipping is not None: current_env["ACCELERATE_GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower() if args.offload_optimizer_device is not None: current_env["ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower() if args.offload_param_device is not None: current_env["ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower() if args.zero3_init_flag is not None: current_env["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower() if args.zero3_save_16bit_model is not None: current_env["ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower() if args.deepspeed_config_file is not None: current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file) if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" if args.deepspeed_moe_layer_cls_names is not None: current_env["ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES"] = str(args.deepspeed_moe_layer_cls_names) return cmd, current_env def prepare_tpu( args: argparse.Namespace, current_env: Dict[str, str], pod: bool = False ) -> Tuple[argparse.Namespace, Dict[str, str]]: """ Prepares and returns an environment with the correct TPU environment variables. """ if args.mixed_precision == "bf16" and is_torch_xla_available(check_is_tpu=True): if args.downcast_bf16: current_env["XLA_DOWNCAST_BF16"] = "1" else: current_env["XLA_USE_BF16"] = "1" if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" if pod: # Take explicit args and set them up for XLA args.vm = args.tpu_vm args.tpu = args.tpu_name return args, current_env def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]: if len(nargs) < 0: return {} # helper function to infer type for argsparser def _infer_type(s): try: s = float(s) if s // 1 == s: return int(s) return s except ValueError: return s parser = argparse.ArgumentParser() _, unknown = parser.parse_known_args(nargs) for index, argument in enumerate(unknown): if argument.startswith(("-", "--")): action = None if index + 1 < len(unknown): # checks if next index would be in list if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key # raise an error if element is store_true or store_false raise ValueError( "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" ) else: # raise an error if last element is store_true or store_false raise ValueError( "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" ) # adds argument to parser based on action_store true if action is None: parser.add_argument(argument, type=_infer_type) else: parser.add_argument(argument, action=action) return { key: (literal_eval(value) if value in ("True", "False") else value) for key, value in parser.parse_args(nargs).__dict__.items() } def prepare_sagemager_args_inputs( sagemaker_config: SageMakerConfig, args: argparse.Namespace ) -> Tuple[argparse.Namespace, Dict[str, Any]]: # configure environment print("Configuring Amazon SageMaker environment") os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region # configure credentials if sagemaker_config.profile is not None: os.environ["AWS_PROFILE"] = sagemaker_config.profile elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None: os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key else: raise OSError("You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile") # extract needed arguments source_dir = os.path.dirname(args.training_script) if not source_dir: # checks if string is empty source_dir = "." entry_point = os.path.basename(args.training_script) if not entry_point.endswith(".py"): raise ValueError(f'Your training script should be a python script and not "{entry_point}"') print("Converting Arguments to Hyperparameters") hyperparameters = _convert_nargs_to_dict(args.training_script_args) try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) # Environment variables to be set for use during training job environment = { "ACCELERATE_USE_SAGEMAKER": "true", "ACCELERATE_MIXED_PRECISION": str(mixed_precision), "ACCELERATE_DYNAMO_BACKEND": dynamo_backend.value, "ACCELERATE_DYNAMO_MODE": args.dynamo_mode, "ACCELERATE_DYNAMO_USE_FULLGRAPH": str(args.dynamo_use_fullgraph), "ACCELERATE_DYNAMO_USE_DYNAMIC": str(args.dynamo_use_dynamic), "ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE": sagemaker_config.distributed_type.value, } if args.mixed_precision.lower() == "fp8": if not is_fp8_available(): raise RuntimeError( "FP8 is not available on this machine. Please ensure that either Transformer Engine or MSAMP is installed." ) environment = setup_fp8_env(args, environment) # configure distribution set up distribution = None if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL: distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} # configure sagemaker inputs sagemaker_inputs = None if sagemaker_config.sagemaker_inputs_file is not None: print(f"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file") sagemaker_inputs = {} with open(sagemaker_config.sagemaker_inputs_file) as file: for i, line in enumerate(file): if i == 0: continue l = line.split("\t") sagemaker_inputs[l[0]] = l[1].strip() print(f"Loaded SageMaker Inputs: {sagemaker_inputs}") # configure sagemaker metrics sagemaker_metrics = None if sagemaker_config.sagemaker_metrics_file is not None: print(f"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file") sagemaker_metrics = [] with open(sagemaker_config.sagemaker_metrics_file) as file: for i, line in enumerate(file): if i == 0: continue l = line.split("\t") metric_dict = { "Name": l[0], "Regex": l[1].strip(), } sagemaker_metrics.append(metric_dict) print(f"Loaded SageMaker Metrics: {sagemaker_metrics}") # configure session print("Creating Estimator") args = { "image_uri": sagemaker_config.image_uri, "entry_point": entry_point, "source_dir": source_dir, "role": sagemaker_config.iam_role_name, "transformers_version": sagemaker_config.transformers_version, "pytorch_version": sagemaker_config.pytorch_version, "py_version": sagemaker_config.py_version, "base_job_name": sagemaker_config.base_job_name, "instance_count": sagemaker_config.num_machines, "instance_type": sagemaker_config.ec2_instance_type, "debugger_hook_config": False, "distribution": distribution, "hyperparameters": hyperparameters, "environment": environment, "metric_definitions": sagemaker_metrics, } if sagemaker_config.additional_args is not None: args = merge_dicts(sagemaker_config.additional_args, args) return args, sagemaker_inputs def env_var_path_add(env_var_name, path_to_add): """ Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the caller to set it in os.environ. """ paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0] paths.append(str(path_to_add)) return ":".join(paths) class PrepareForLaunch: """ Prepare a function that will launched in a distributed setup. Args: launcher (`Callable`): The function to launch. distributed_type ([`~state.DistributedType`]): The distributed type to prepare for. debug (`bool`, *optional*, defaults to `False`): Whether or not this is a debug launch. """ def __init__(self, launcher, distributed_type="NO", debug=False): self.launcher = launcher self.distributed_type = DistributedType(distributed_type) self.debug = debug def __call__(self, index, *args): if self.debug: world_size = int(os.environ.get("WORLD_SIZE")) rdv_file = os.environ.get("ACCELERATE_DEBUG_RDV_FILE") torch.distributed.init_process_group( "gloo", rank=index, store=torch.distributed.FileStore(rdv_file, world_size), world_size=world_size, ) elif self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, ): # Prepare the environment for torch.distributed os.environ["LOCAL_RANK"] = str(index) nproc = int(os.environ.get("NPROC", 1)) node_rank = int(os.environ.get("NODE_RANK", 0)) os.environ["RANK"] = str(nproc * node_rank + index) os.environ["FORK_LAUNCHED"] = str(1) self.launcher(*args)
accelerate/src/accelerate/utils/launch.py/0
{ "file_path": "accelerate/src/accelerate/utils/launch.py", "repo_id": "accelerate", "token_count": 12913 }
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "zero_optimization": { "stage": 3, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "sub_group_size": 1e9, "reduce_bucket_size": 1e9, "stage3_prefetch_bucket_size": 1e9, "stage3_param_persistence_threshold": 1e9, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "train_micro_batch_size_per_gpu": 1 }
accelerate/tests/deepspeed/ds_config_zero3_model_only.json/0
{ "file_path": "accelerate/tests/deepspeed/ds_config_zero3_model_only.json", "repo_id": "accelerate", "token_count": 425 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import pytest import torch from parameterized import parameterized from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator, PartialState from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, DataLoaderStateMixin, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, prepare_data_loader, skip_first_batches, ) from accelerate.state import GradientState from accelerate.test_utils.testing import require_torchdata_stateful_dataloader from accelerate.utils import is_torchdata_stateful_dataloader_available if is_torchdata_stateful_dataloader_available(): from torchdata.stateful_dataloader import ( StatefulDataLoader, ) def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = f"num_workers_{param.args[0]}" return f"{func.__name__}_{param_based_name}" class RandomIterableDataset(IterableDataset): # For testing, an iterable dataset of random length def __init__(self, p_stop=0.01, max_length=1000): self.p_stop = p_stop self.max_length = max_length def __iter__(self): count = 0 stop = False while not stop and count < self.max_length: yield count count += 1 stop = random.random() < self.p_stop class SimpleIterableDataset(IterableDataset): def __init__(self, num_samples=1000): self.num_samples = num_samples def __iter__(self): for _ in range(self.num_samples): yield torch.rand(1) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch class SimpleBatchSampler(BatchSampler): def __init__(self, sampler, batch_size, drop_last, generator, seed): super().__init__(sampler, batch_size, drop_last) self.generator = generator self.seed = seed self.epoch = 0 def __iter__(self): self.generator.manual_seed(self.seed + self.epoch) return super().__iter__() def set_epoch(self, epoch): self.epoch = epoch class DataLoaderTester(unittest.TestCase): def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False, even_batches=True): batch_sampler_shards = [ BatchSamplerShard(batch_sampler, 2, i, split_batches=split_batches, even_batches=even_batches) for i in range(2) ] batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards] if not split_batches: assert [len(shard) for shard in batch_sampler_shards] == [len(e) for e in expected] assert batch_sampler_lists == expected def test_batch_sampler_shards_with_no_splits(self): # Check the shards when the dataset is a round multiple of total batch size. batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is a round multiple of batch size but not total batch size. batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False) expected = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected) def test_batch_sampler_shards_with_splits(self): # Check the shards when the dataset is a round multiple of batch size. batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) # Check the shards when the dataset is not a round multiple of batch size. batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) # Check the shards when the dataset is not a round multiple of batch size or num_processes. batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False) expected = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) def test_batch_sampler_shards_with_no_splits_no_even(self): # Check the shards when the dataset is a round multiple of total batch size. batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is a round multiple of batch size but not total batch size. batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False) expected = [[[0, 1]], []] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) def test_batch_sampler_shards_with_splits_no_even(self): # Check the shards when the dataset is a round multiple of batch size. batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size. batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size or num_processes. batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False) expected = [[[0, 1]], []] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) def test_batch_sampler_with_varying_batch_size(self): batch_sampler = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, even_batches=False) for i in range(2)] assert len(batch_sampler_shards[0]) == 3 assert len(batch_sampler_shards[1]) == 2 assert list(batch_sampler_shards[0]) == [[0, 1, 2], [5, 6, 7, 8], [12, 13]] assert list(batch_sampler_shards[1]) == [[3, 4], [9, 10, 11]] def check_iterable_dataset_shards( self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False ): random.seed(seed) reference = list(dataset) iterable_dataset_shards = [ IterableDatasetShard( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i, split_batches=split_batches, ) for i in range(num_processes) ] iterable_dataset_lists = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(seed) iterable_dataset_lists.append(list(iterable_dataset_shard)) shard_batch_size = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size first_list = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: assert len(l) == len(first_list) assert (len(l) % shard_batch_size) == 0 observed = [] for idx in range(0, len(first_list), shard_batch_size): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(reference) < len(observed): reference += reference assert observed == reference[: len(observed)] def test_iterable_dataset_shard(self): seed = 42 dataset = RandomIterableDataset() self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True) # Edge case with a very small dataset dataset = RandomIterableDataset(max_length=2) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True) def test_iterable_dataset_using_none_batch_size(self): dataset = SimpleIterableDataset(100) dataloader = DataLoader(dataset, batch_size=None) dataloader = prepare_data_loader(dataloader) for d in dataloader: assert isinstance(d, torch.Tensor) def test_skip_batch_sampler(self): batch_sampler = BatchSampler(range(16), batch_size=4, drop_last=False) new_batch_sampler = SkipBatchSampler(batch_sampler, 2) assert list(new_batch_sampler) == [[8, 9, 10, 11], [12, 13, 14, 15]] def test_dataloader_inheritance(self): """ `DataLoaderAdapter`'s parent classes are dynamically constructed, assert that subclasses of DataLoaderAdapter are instances of DataLoader and DataLoaderStateMixin. """ skip_dl = SkipDataLoader(range(16), batch_size=4, skip_batches=2) dl_shard = DataLoaderShard(range(16), batch_size=4) dl_dispatcher = DataLoaderDispatcher(range(16), batch_size=4) # Test dataloaders are instances of instantiated classes # These asserts look redundant, but it's worth checking since we are doing magic tricks such as dynamically overriding __class__ assert isinstance(skip_dl, SkipDataLoader) assert isinstance(dl_shard, DataLoaderShard) assert isinstance(dl_dispatcher, DataLoaderDispatcher) # Test dataloaders are instances of base classes assert isinstance(skip_dl, DataLoader) assert isinstance(dl_shard, DataLoader) assert isinstance(dl_dispatcher, DataLoader) assert isinstance(dl_shard, DataLoaderStateMixin) assert isinstance(dl_dispatcher, DataLoaderStateMixin) assert isinstance(skip_dl.base_dataloader, DataLoader) assert isinstance(dl_shard.base_dataloader, DataLoader) assert isinstance(dl_dispatcher.base_dataloader, DataLoader) with pytest.raises(AttributeError): _ = DataLoaderShard.base_dataloader def test_skip_data_loader(self): dataloader = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2) assert [t.tolist() for t in dataloader] == [[8, 9, 10, 11], [12, 13, 14, 15]] def test_skip_first_batches(self): dataloader = DataLoader(list(range(16)), batch_size=4) new_dataloader = skip_first_batches(dataloader, num_batches=2) assert [t.tolist() for t in new_dataloader] == [[8, 9, 10, 11], [12, 13, 14, 15]] def test_end_of_dataloader(self): dataloader = DataLoaderShard(list(range(16)), batch_size=4) for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) def test_end_of_dataloader_dispatcher(self): dataloader = DataLoaderDispatcher(range(16), batch_size=4) for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) def test_set_epoch_in_batch_sampler(self): # Ensure that set_epoch gets propagated to custom batch samplers that accept it dataset = list(range(16)) generator = torch.Generator() batch_sampler = SimpleBatchSampler(dataset, batch_size=4, drop_last=False, generator=generator, seed=12) dataloader = DataLoader(dataset, batch_sampler=batch_sampler) accelerator = Accelerator() dataloader = accelerator.prepare_data_loader(dataloader) assert batch_sampler.epoch == 0 dataloader.set_epoch(1) assert batch_sampler.epoch == 1 class StatefulDataLoaderTester(unittest.TestCase): @require_torchdata_stateful_dataloader def test_skip_data_loader(self): dataloader = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2, use_stateful_dataloader=True) assert isinstance(dataloader, StatefulDataLoader) assert [t.tolist() for t in dataloader] == [[8, 9, 10, 11], [12, 13, 14, 15]] @require_torchdata_stateful_dataloader def test_end_of_dataloader(self): dataloader = DataLoaderShard(list(range(16)), batch_size=4, use_stateful_dataloader=True) assert dataloader.use_stateful_dataloader assert isinstance(dataloader, StatefulDataLoader) for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) @require_torchdata_stateful_dataloader def test_end_of_dataloader_dispatcher(self): dataloader = DataLoaderDispatcher(range(16), batch_size=4, use_stateful_dataloader=True) assert isinstance(dataloader, StatefulDataLoader) for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) @parameterized.expand([0, 2], name_func=parameterized_custom_name_func) @require_torchdata_stateful_dataloader def test_dataloader_state_dict(self, num_workers): """ Test that saving a stateful dataloader's state, then loading it back, gives the same results. """ dataset = list(range(16)) dataloader = DataLoaderShard(dataset, batch_size=4, use_stateful_dataloader=True, num_workers=num_workers) assert dataloader.use_stateful_dataloader assert isinstance(dataloader, StatefulDataLoader) vals = [] for idx, val in enumerate(dataloader): vals.append(val) if idx == 1: sd = dataloader.state_dict() assert len(vals) == 4 dataloader2 = DataLoaderShard(dataset, batch_size=4, use_stateful_dataloader=True, num_workers=num_workers) dataloader2.load_state_dict(sd) data1 = vals[2:] data2 = list(dataloader2) assert len(data1) == len(data2) for d1, d2 in zip(data1, data2): assert torch.allclose(d1, d2) @parameterized.expand([0, 2], name_func=parameterized_custom_name_func) @require_torchdata_stateful_dataloader def test_dataloader_dispatcher_state_dict(self, num_workers): """ Test that saving a stateful dataloader's state, then loading it back, gives the same results. """ dataset = list(range(16)) dataloader = DataLoaderDispatcher(dataset, batch_size=4, use_stateful_dataloader=True, num_workers=num_workers) assert dataloader.use_stateful_dataloader assert isinstance(dataloader, StatefulDataLoader) vals = [] for idx, val in enumerate(dataloader): vals.append(val) if idx == 1: sd = dataloader.state_dict() assert len(vals) == 4 dataloader2 = DataLoaderDispatcher( dataset, batch_size=4, use_stateful_dataloader=True, num_workers=num_workers ) dataloader2.load_state_dict(sd) data1 = vals[2:] data2 = list(dataloader2) assert len(data1) == len(data2) for d1, d2 in zip(data1, data2): assert torch.allclose(d1, d2) @require_torchdata_stateful_dataloader def test_dataloader_inheritance(self): """ `DataLoaderAdapter`'s parent classes are dynamically constructed, assert that if use_stateful_dataloader=True, subclasses of DataLoaderAdapter are instances of StatefulDataLoader and DataLoaderStateMixin. """ skip_dl = SkipDataLoader(range(16), batch_size=4, skip_batches=2, use_stateful_dataloader=True) dl_shard = DataLoaderShard(range(16), batch_size=4, use_stateful_dataloader=True) dl_dispatcher = DataLoaderDispatcher(range(16), batch_size=4, use_stateful_dataloader=True) # Test dataloaders are instances of instantiated classes # These asserts look redundant, but it's worth checking since we are doing magic tricks such as dynamically overriding __class__ assert isinstance(skip_dl, SkipDataLoader) assert isinstance(dl_shard, DataLoaderShard) assert isinstance(dl_dispatcher, DataLoaderDispatcher) assert isinstance(skip_dl, StatefulDataLoader) assert isinstance(dl_shard, StatefulDataLoader) assert isinstance(dl_dispatcher, StatefulDataLoader) assert isinstance(dl_shard, DataLoaderStateMixin) assert isinstance(dl_dispatcher, DataLoaderStateMixin) assert isinstance(skip_dl.base_dataloader, StatefulDataLoader) assert isinstance(dl_shard.base_dataloader, StatefulDataLoader) assert isinstance(dl_dispatcher.base_dataloader, StatefulDataLoader) @parameterized.expand([0, 2], name_func=parameterized_custom_name_func) @require_torchdata_stateful_dataloader def test_stateful_dataloader_adapter_equivalent_to_torchdata_stateful_dataloader(self, num_workers): """ Assert that `state_dict()` and `load_state_dict()` for derived subclasses of `DataLoaderAdapter` produce the same behavior as `state_dict()` and `load_state_dict()` for `StatefulDataLoader`. """ dataset = list(range(64)) # Set the seed for reproducibility def g(): return torch.Generator().manual_seed(42) accelerator = Accelerator() stateful_dl = StatefulDataLoader(dataset, batch_size=4, num_workers=num_workers, generator=g()) skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dl_shard = DataLoaderShard( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dl_dispatcher = DataLoaderDispatcher( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dataloaders_under_test = [skip_dl, dl_shard, dl_dispatcher] num_batches_to_skip = 8 def get_first_n_batches(dl, n, device): """ Iterate over the first `n` batches of a dataloader then break, returning the batches in a list. """ batches = [] for idx, batch in enumerate(dl): if idx == n - 1: if hasattr(dl, "end"): dl.end() break batches.append(batch.to(device)) return batches # Iterate over all of the dataloaders identically, expect the same values expected_batches = get_first_n_batches(stateful_dl, num_batches_to_skip, accelerator.device) batches_from_dataloaders = [ get_first_n_batches(dl, num_batches_to_skip, accelerator.device) for dl in dataloaders_under_test ] for dl_batches in batches_from_dataloaders: for expected, actual in zip(expected_batches, dl_batches): assert torch.allclose(expected, actual) # The adapters should all produce the same state_dict as the reference stateful dataloader expected_state_dict = stateful_dl.state_dict() skip_dl_state_dict = skip_dl.state_dict() dl_shard_state_dict = dl_shard.state_dict() dl_dispatcher_state_dict = dl_dispatcher.state_dict() assert expected_state_dict == skip_dl_state_dict assert expected_state_dict == dl_shard_state_dict assert expected_state_dict == dl_dispatcher_state_dict # Load the state dict into new dataloaders manual_skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), skip_batches=num_batches_to_skip, use_stateful_dataloader=True, ) loaded_stateful_dl = StatefulDataLoader(dataset, batch_size=4, num_workers=num_workers, generator=g()) loaded_stateful_dl.load_state_dict(expected_state_dict) loaded_skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_skip_dl.load_state_dict(expected_state_dict) loaded_dl_shard = DataLoaderShard( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_dl_shard.load_state_dict(expected_state_dict) loaded_dl_dispatcher = DataLoaderDispatcher( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_dl_dispatcher.load_state_dict(expected_state_dict) # Continue the iteration, expecting identical behavior across the board def get_all_batches(dl, device): """ Iterate over all batches of a dataloader, returning (batches, num_batches_yielded) """ batches = [] num_batches_yielded = 0 for batch in dl: batches.append(batch.to(device)) num_batches_yielded += 1 return (batches, num_batches_yielded) expected_batch_results = get_all_batches(loaded_stateful_dl, accelerator.device) dataloader_batch_results = [ get_all_batches(dl, accelerator.device) for dl in [manual_skip_dl, loaded_skip_dl, loaded_dl_shard, loaded_dl_dispatcher] ] for dl_results in dataloader_batch_results: for expected, actual in zip(expected_batches, dl_batches): assert torch.allclose(expected[0], actual[0]) assert expected_batch_results[1] == dl_results[1] assert accelerator.gradient_state.active_dataloader is None @parameterized.expand([0, 2], name_func=parameterized_custom_name_func) @require_torchdata_stateful_dataloader def test_decoupled_stateful_dataloader_adapter_equivalent_to_torchdata_stateful_dataloader(self, num_workers): """ Assert that `state_dict()` and `load_state_dict()` for derived subclasses of `DataLoaderAdapter` produce the same behavior as `state_dict()` and `load_state_dict()` for `StatefulDataLoader` when *not* using Accelerator (and instead using the decoupled `PartialState` workflow). """ dataset = list(range(64)) # Set the seed for reproducibility def g(): return torch.Generator().manual_seed(42) state = PartialState() stateful_dl = StatefulDataLoader(dataset, batch_size=4, num_workers=num_workers, generator=g()) skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dl_shard = DataLoaderShard( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dl_dispatcher = DataLoaderDispatcher( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dataloaders_under_test = [skip_dl, dl_shard, dl_dispatcher] num_batches_to_skip = 8 def get_first_n_batches(dl, n, device): """ Iterate over the first `n` batches of a dataloader then break, returning the batches in a list. """ batches = [] for idx, batch in enumerate(dl): if idx == n - 1: if hasattr(dl, "end"): dl.end() break batches.append(batch.to(device)) return batches # Iterate over all of the dataloaders identically, expect the same values expected_batches = get_first_n_batches(stateful_dl, num_batches_to_skip, state.device) batches_from_dataloaders = [ get_first_n_batches(dl, num_batches_to_skip, state.device) for dl in dataloaders_under_test ] for dl_batches in batches_from_dataloaders: for expected, actual in zip(expected_batches, dl_batches): assert torch.allclose(expected, actual) # The adapters should all produce the same state_dict as the reference stateful dataloader expected_state_dict = stateful_dl.state_dict() skip_dl_state_dict = skip_dl.state_dict() dl_shard_state_dict = dl_shard.state_dict() dl_dispatcher_state_dict = dl_dispatcher.state_dict() assert expected_state_dict == skip_dl_state_dict assert expected_state_dict == dl_shard_state_dict assert expected_state_dict == dl_dispatcher_state_dict # Load the state dict into new dataloaders manual_skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), skip_batches=num_batches_to_skip, use_stateful_dataloader=True, ) loaded_stateful_dl = StatefulDataLoader(dataset, batch_size=4, num_workers=num_workers, generator=g()) loaded_stateful_dl.load_state_dict(expected_state_dict) loaded_skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_skip_dl.load_state_dict(expected_state_dict) loaded_dl_shard = DataLoaderShard( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_dl_shard.load_state_dict(expected_state_dict) loaded_dl_dispatcher = DataLoaderDispatcher( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_dl_dispatcher.load_state_dict(expected_state_dict) # Continue the iteration, expecting identical behavior across the board def get_all_batches(dl, device): """ Iterate over all batches of a dataloader, returning (batches, num_batches_yielded) """ batches = [] num_batches_yielded = 0 for batch in dl: batches.append(batch.to(device)) num_batches_yielded += 1 return (batches, num_batches_yielded) expected_batch_results = get_all_batches(loaded_stateful_dl, state.device) dataloader_batch_results = [ get_all_batches(dl, state.device) for dl in [manual_skip_dl, loaded_skip_dl, loaded_dl_shard, loaded_dl_dispatcher] ] for dl_results in dataloader_batch_results: for expected, actual in zip(expected_batches, dl_batches): assert torch.allclose(expected[0], actual[0]) assert expected_batch_results[1] == dl_results[1] # Using the decoupled (`PartialState`) workflow, GradientState should be automatically initialized (with # default parameters) by `DataLoaderDispatcher` assert GradientState._shared_state != {}, "GradientState should already be initialized!" gradient_state = GradientState() assert gradient_state.active_dataloader is None
accelerate/tests/test_data_loader.py/0
{ "file_path": "accelerate/tests/test_data_loader.py", "repo_id": "accelerate", "token_count": 16967 }
#[cfg(test)] pub mod simplified; #[cfg(test)] mod tests { use anyhow::Result; use candle::{DType, Device, Tensor}; use parquet::file::reader::SerializedFileReader; // NOTE: Waiting on https://github.com/rust-lang/mdBook/pull/1856 #[rustfmt::skip] #[tokio::test] async fn book_hub_1() { // ANCHOR: book_hub_1 use candle::Device; use hf_hub::api::tokio::Api; let api = Api::new().unwrap(); let repo = api.model("bert-base-uncased".to_string()); let weights_filename = repo.get("model.safetensors").await.unwrap(); let weights = candle::safetensors::load(weights_filename, &Device::Cpu).unwrap(); // ANCHOR_END: book_hub_1 assert_eq!(weights.len(), 206); } #[rustfmt::skip] #[test] fn book_hub_2() { { // ANCHOR: book_hub_2 use candle::Device; use hf_hub::api::sync::Api; use memmap2::Mmap; use std::fs; let api = Api::new().unwrap(); let repo = api.model("bert-base-uncased".to_string()); let weights_filename = repo.get("model.safetensors").unwrap(); let file = fs::File::open(weights_filename).unwrap(); let mmap = unsafe { Mmap::map(&file).unwrap() }; let weights = candle::safetensors::load_buffer(&mmap[..], &Device::Cpu).unwrap(); // ANCHOR_END: book_hub_2 assert_eq!(weights.len(), 206); } // #[rustfmt::skip] // #[test] // fn book_hub_3() { { // ANCHOR: book_hub_3 use candle::{DType, Device, Tensor}; use hf_hub::api::sync::Api; use memmap2::Mmap; use safetensors::slice::IndexOp; use safetensors::SafeTensors; use std::fs; let api = Api::new().unwrap(); let repo = api.model("bert-base-uncased".to_string()); let weights_filename = repo.get("model.safetensors").unwrap(); let file = fs::File::open(weights_filename).unwrap(); let mmap = unsafe { Mmap::map(&file).unwrap() }; // Use safetensors directly let tensors = SafeTensors::deserialize(&mmap[..]).unwrap(); let view = tensors .tensor("bert.encoder.layer.0.attention.self.query.weight") .unwrap(); // We're going to load shard with rank 1, within a world_size of 4 // We're going to split along dimension 0 doing VIEW[start..stop, :] let rank = 1; let world_size = 4; let dim = 0; let dtype = view.dtype(); let mut tp_shape = view.shape().to_vec(); let size = tp_shape[0]; if size % world_size != 0 { panic!("The dimension is not divisible by `world_size`"); } let block_size = size / world_size; let start = rank * block_size; let stop = (rank + 1) * block_size; // Everything is expressed in tensor dimension // bytes offsets is handled automatically for safetensors. let iterator = view.slice(start..stop).unwrap(); tp_shape[dim] = block_size; // Convert safetensors Dtype to candle DType let dtype: DType = dtype.try_into().unwrap(); // TODO: Implement from_buffer_iterator so we can skip the extra CPU alloc. let raw: Vec<u8> = iterator.into_iter().flatten().cloned().collect(); let tp_tensor = Tensor::from_raw_buffer(&raw, dtype, &tp_shape, &Device::Cpu).unwrap(); // ANCHOR_END: book_hub_3 assert_eq!(view.shape(), &[768, 768]); assert_eq!(tp_tensor.dims(), &[192, 768]); } } #[allow(unused)] #[rustfmt::skip] fn book_training_1() -> Result<()>{ // ANCHOR: book_training_1 use hf_hub::{api::sync::Api, Repo, RepoType}; let dataset_id = "mnist".to_string(); let api = Api::new()?; let repo = Repo::with_revision( dataset_id, RepoType::Dataset, "refs/convert/parquet".to_string(), ); let repo = api.repo(repo); let test_parquet_filename = repo.get("mnist/test/0000.parquet")?; let train_parquet_filename = repo.get("mnist/train/0000.parquet")?; let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)?; let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)?; // ANCHOR_END: book_training_1 // Ignore unused let _train = train_parquet; // ANCHOR: book_training_2 for row in test_parquet { for (idx, (name, field)) in row?.get_column_iter().enumerate() { println!("Column id {idx}, name {name}, value {field}"); } } // ANCHOR_END: book_training_2 let test_parquet_filename = repo.get("mnist/test/0000.parquet")?; let train_parquet_filename = repo.get("mnist/train/0000.parquet")?; let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)?; let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)?; // ANCHOR: book_training_3 let test_samples = 10_000; let mut test_buffer_images: Vec<u8> = Vec::with_capacity(test_samples * 784); let mut test_buffer_labels: Vec<u8> = Vec::with_capacity(test_samples); for row in test_parquet{ for (_name, field) in row?.get_column_iter() { if let parquet::record::Field::Group(subrow) = field { for (_name, field) in subrow.get_column_iter() { if let parquet::record::Field::Bytes(value) = field { let image = image::load_from_memory(value.data()).unwrap(); test_buffer_images.extend(image.to_luma8().as_raw()); } } }else if let parquet::record::Field::Long(label) = field { test_buffer_labels.push(*label as u8); } } } let test_images = (Tensor::from_vec(test_buffer_images, (test_samples, 784), &Device::Cpu)?.to_dtype(DType::F32)? / 255.)?; let test_labels = Tensor::from_vec(test_buffer_labels, (test_samples, ), &Device::Cpu)?; let train_samples = 60_000; let mut train_buffer_images: Vec<u8> = Vec::with_capacity(train_samples * 784); let mut train_buffer_labels: Vec<u8> = Vec::with_capacity(train_samples); for row in train_parquet{ for (_name, field) in row?.get_column_iter() { if let parquet::record::Field::Group(subrow) = field { for (_name, field) in subrow.get_column_iter() { if let parquet::record::Field::Bytes(value) = field { let image = image::load_from_memory(value.data()).unwrap(); train_buffer_images.extend(image.to_luma8().as_raw()); } } }else if let parquet::record::Field::Long(label) = field { train_buffer_labels.push(*label as u8); } } } let train_images = (Tensor::from_vec(train_buffer_images, (train_samples, 784), &Device::Cpu)?.to_dtype(DType::F32)? / 255.)?; let train_labels = Tensor::from_vec(train_buffer_labels, (train_samples, ), &Device::Cpu)?; let mnist = candle_datasets::vision::Dataset { train_images, train_labels, test_images, test_labels, labels: 10, }; // ANCHOR_END: book_training_3 assert_eq!(mnist.test_images.dims(), &[10_000, 784]); assert_eq!(mnist.test_labels.dims(), &[10_000]); assert_eq!(mnist.train_images.dims(), &[60_000, 784]); assert_eq!(mnist.train_labels.dims(), &[60_000]); Ok(()) } }
candle/candle-book/src/lib.rs/0
{ "file_path": "candle/candle-book/src/lib.rs", "repo_id": "candle", "token_count": 2808 }
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn rand_uniform(a: &Tensor) { a.rand_like(-1.0, 123.0).unwrap(); } fn rand_normal(a: &Tensor) { a.randn_like(100.0, 15.0).unwrap(); } fn run_random_bench(c: &mut Criterion, device: &Device) { let b = 1; let rows = 2048; let cols = 2048; let dtype = DType::F32; let tensor = Tensor::zeros((b, rows, cols), dtype, device).unwrap(); let flops = b * rows * cols * dtype.size_in_bytes(); let mut group = c.benchmark_group(device.bench_name("random_uniform")); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |benches| { benches.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { rand_uniform(black_box(&tensor)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); let tensor = Tensor::zeros((b, rows, cols), dtype, device).unwrap(); let mut group = c.benchmark_group(device.bench_name("random_normal")); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |benches| { benches.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { rand_normal(black_box(&tensor)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { run_random_bench(c, &device); } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/random.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/random.rs", "repo_id": "candle", "token_count": 812 }
//! Traits and methods for CPU-backed Tensors pub mod erf; pub mod kernels; #[allow(unused)] trait Cpu<const ARR: usize> { type Unit; type Array; const STEP: usize; const EPR: usize; fn n() -> usize; unsafe fn zero() -> Self::Unit; unsafe fn zero_array() -> Self::Array; unsafe fn load(mem_addr: *const f32) -> Self::Unit; unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit; unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit; unsafe fn vec_reduce(x: Self::Array, y: *mut f32); unsafe fn from_f32(v: f32) -> Self::Unit; unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit); } #[allow(unused)] trait CpuF16<const ARR: usize> { type Unit; type Array; const STEP: usize; const EPR: usize; fn n() -> usize; unsafe fn zero() -> Self::Unit; unsafe fn zero_array() -> Self::Array; unsafe fn load(mem_addr: *const f16) -> Self::Unit; unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit; unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit; unsafe fn vec_reduce(x: Self::Array, y: *mut f32); unsafe fn from_f32(v: f32) -> Self::Unit; unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit); } use half::f16; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] #[cfg(target_feature = "avx")] pub mod avx; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] #[cfg(target_feature = "avx")] pub use avx::{CurrentCpu, CurrentCpuF16}; #[cfg(target_arch = "wasm32")] #[cfg(target_feature = "simd128")] pub mod simd128; #[cfg(target_arch = "wasm32")] #[cfg(target_feature = "simd128")] pub use simd128::CurrentCpu; #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] #[cfg(target_feature = "neon")] pub mod neon; #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] #[cfg(target_feature = "neon")] pub use neon::CurrentCpu; #[cfg(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" ))] #[inline(always)] pub(crate) unsafe fn vec_dot_f32(a_row: *const f32, b_row: *const f32, c: *mut f32, k: usize) { let np = k & !(CurrentCpu::STEP - 1); let mut sum = CurrentCpu::zero_array(); let mut ax = CurrentCpu::zero_array(); let mut ay = CurrentCpu::zero_array(); for i in (0..np).step_by(CurrentCpu::STEP) { for j in 0..CurrentCpu::n() { ax[j] = CurrentCpu::load(a_row.add(i + j * CurrentCpu::EPR)); ay[j] = CurrentCpu::load(b_row.add(i + j * CurrentCpu::EPR)); sum[j] = CurrentCpu::vec_fma(sum[j], ax[j], ay[j]); } } CurrentCpu::vec_reduce(sum, c); // leftovers for i in np..k { *c += *a_row.add(i) * (*b_row.add(i)); } } #[cfg(not(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" )))] #[inline(always)] pub(crate) unsafe fn vec_dot_f32(a_row: *const f32, b_row: *const f32, c: *mut f32, k: usize) { // leftovers for i in 0..k { *c += *a_row.add(i) * (*b_row.add(i)); } } #[cfg(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" ))] #[inline(always)] pub(crate) unsafe fn vec_sum(row: *const f32, b: *mut f32, k: usize) { let np = k & !(CurrentCpu::STEP - 1); let mut sum = CurrentCpu::zero_array(); let mut x = CurrentCpu::zero_array(); for i in (0..np).step_by(CurrentCpu::STEP) { for j in 0..CurrentCpu::n() { x[j] = CurrentCpu::load(row.add(i + j * CurrentCpu::EPR)); sum[j] = CurrentCpu::vec_add(sum[j], x[j]); } } CurrentCpu::vec_reduce(sum, b); // leftovers for i in np..k { *b += *row.add(i) } } #[cfg(not(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" )))] #[inline(always)] pub(crate) unsafe fn vec_sum(row: *const f32, b: *mut f32, k: usize) { *b = 0f32; for i in 0..k { *b += *row.add(i) } } #[cfg(target_feature = "avx")] #[inline(always)] pub(crate) unsafe fn vec_dot_f16(a_row: *const f16, b_row: *const f16, c: *mut f32, k: usize) { let mut sumf = 0.0f32; let np = k & !(CurrentCpuF16::STEP - 1); let mut sum = CurrentCpuF16::zero_array(); let mut ax = CurrentCpuF16::zero_array(); let mut ay = CurrentCpuF16::zero_array(); for i in (0..np).step_by(CurrentCpuF16::STEP) { for j in 0..CurrentCpuF16::n() { ax[j] = CurrentCpuF16::load(a_row.add(i + j * CurrentCpuF16::EPR)); ay[j] = CurrentCpuF16::load(b_row.add(i + j * CurrentCpuF16::EPR)); sum[j] = CurrentCpuF16::vec_fma(sum[j], ax[j], ay[j]); } } CurrentCpuF16::vec_reduce(sum, &mut sumf); // leftovers for i in np..k { sumf += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32(); } *c = sumf; } #[cfg(not(target_feature = "avx"))] #[inline(always)] pub(crate) unsafe fn vec_dot_f16(a_row: *const f16, b_row: *const f16, c: *mut f32, k: usize) { // leftovers let mut sum = 0.0; for i in 0..k { sum += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32(); } *c = sum; }
candle/candle-core/src/cpu/mod.rs/0
{ "file_path": "candle/candle-core/src/cpu/mod.rs", "repo_id": "candle", "token_count": 2446 }
//! Candle-specific Error and Result use crate::{DType, DeviceLocation, Layout, MetalError, Shape}; #[derive(Debug, Clone)] pub struct MatMulUnexpectedStriding { pub lhs_l: Layout, pub rhs_l: Layout, pub bmnk: (usize, usize, usize, usize), pub msg: &'static str, } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{self}") } } /// Main library error type. #[derive(thiserror::Error)] pub enum Error { // === DType Errors === #[error("{msg}, expected: {expected:?}, got: {got:?}")] UnexpectedDType { msg: &'static str, expected: DType, got: DType, }, #[error("dtype mismatch in {op}, lhs: {lhs:?}, rhs: {rhs:?}")] DTypeMismatchBinaryOp { lhs: DType, rhs: DType, op: &'static str, }, #[error("unsupported dtype {0:?} for op {1}")] UnsupportedDTypeForOp(DType, &'static str), // === Dimension Index Errors === #[error("{op}: dimension index {dim} out of range for shape {shape:?}")] DimOutOfRange { shape: Shape, dim: i32, op: &'static str, }, #[error("{op}: duplicate dim index {dims:?} for shape {shape:?}")] DuplicateDimIndex { shape: Shape, dims: Vec<usize>, op: &'static str, }, // === Shape Errors === #[error("unexpected rank, expected: {expected}, got: {got} ({shape:?})")] UnexpectedNumberOfDims { expected: usize, got: usize, shape: Shape, }, #[error("{msg}, expected: {expected:?}, got: {got:?}")] UnexpectedShape { msg: String, expected: Shape, got: Shape, }, #[error( "Shape mismatch, got buffer of size {buffer_size} which is compatible with shape {shape:?}" )] ShapeMismatch { buffer_size: usize, shape: Shape }, #[error("shape mismatch in {op}, lhs: {lhs:?}, rhs: {rhs:?}")] ShapeMismatchBinaryOp { lhs: Shape, rhs: Shape, op: &'static str, }, #[error("shape mismatch in cat for dim {dim}, shape for arg 1: {first_shape:?} shape for arg {n}: {nth_shape:?}")] ShapeMismatchCat { dim: usize, first_shape: Shape, n: usize, nth_shape: Shape, }, #[error("Cannot divide tensor of shape {shape:?} equally along dim {dim} into {n_parts}")] ShapeMismatchSplit { shape: Shape, dim: usize, n_parts: usize, }, #[error("{op} can only be performed on a single dimension")] OnlySingleDimension { op: &'static str, dims: Vec<usize> }, #[error("empty tensor for {op}")] EmptyTensor { op: &'static str }, // === Device Errors === #[error("device mismatch in {op}, lhs: {lhs:?}, rhs: {rhs:?}")] DeviceMismatchBinaryOp { lhs: DeviceLocation, rhs: DeviceLocation, op: &'static str, }, // === Op Specific Errors === #[error("narrow invalid args {msg}: {shape:?}, dim: {dim}, start: {start}, len:{len}")] NarrowInvalidArgs { shape: Shape, dim: usize, start: usize, len: usize, msg: &'static str, }, #[error("conv1d invalid args {msg}: inp: {inp_shape:?}, k: {k_shape:?}, pad: {padding}, stride: {stride}")] Conv1dInvalidArgs { inp_shape: Shape, k_shape: Shape, padding: usize, stride: usize, msg: &'static str, }, #[error("{op} invalid index {index} with dim size {size}")] InvalidIndex { op: &'static str, index: usize, size: usize, }, #[error("cannot broadcast {src_shape:?} to {dst_shape:?}")] BroadcastIncompatibleShapes { src_shape: Shape, dst_shape: Shape }, #[error("cannot set variable {msg}")] CannotSetVar { msg: &'static str }, // Box indirection to avoid large variant. #[error("{0:?}")] MatMulUnexpectedStriding(Box<MatMulUnexpectedStriding>), #[error("{op} only supports contiguous tensors")] RequiresContiguous { op: &'static str }, #[error("{op} expects at least one tensor")] OpRequiresAtLeastOneTensor { op: &'static str }, #[error("{op} expects at least two tensors")] OpRequiresAtLeastTwoTensors { op: &'static str }, #[error("backward is not supported for {op}")] BackwardNotSupported { op: &'static str }, // === Other Errors === #[error("the candle crate has not been built with cuda support")] NotCompiledWithCudaSupport, #[error("the candle crate has not been built with metal support")] NotCompiledWithMetalSupport, #[error("cannot find tensor {path}")] CannotFindTensor { path: String }, // === Wrapped Errors === #[error(transparent)] Cuda(Box<dyn std::error::Error + Send + Sync>), #[error("Metal error {0}")] Metal(#[from] MetalError), #[cfg(not(target_arch = "wasm32"))] #[error(transparent)] Ug(#[from] ug::Error), #[error(transparent)] TryFromIntError(#[from] core::num::TryFromIntError), #[error("npy/npz error {0}")] Npy(String), /// Zip file format error. #[error(transparent)] Zip(#[from] zip::result::ZipError), /// Integer parse error. #[error(transparent)] ParseInt(#[from] std::num::ParseIntError), /// Utf8 parse error. #[error(transparent)] FromUtf8(#[from] std::string::FromUtf8Error), /// I/O error. #[error(transparent)] Io(#[from] std::io::Error), /// SafeTensor error. #[error(transparent)] SafeTensor(#[from] safetensors::SafeTensorError), #[error("unsupported safetensor dtype {0:?}")] UnsupportedSafeTensorDtype(safetensors::Dtype), /// Arbitrary errors wrapping. #[error("{0}")] Wrapped(Box<dyn std::fmt::Display + Send + Sync>), #[error("{context}\n{inner}")] Context { inner: Box<Self>, context: Box<dyn std::fmt::Display + Send + Sync>, }, /// Adding path information to an error. #[error("path: {path:?} {inner}")] WithPath { inner: Box<Self>, path: std::path::PathBuf, }, #[error("{inner}\n{backtrace}")] WithBacktrace { inner: Box<Self>, backtrace: Box<std::backtrace::Backtrace>, }, /// User generated error message, typically created via `bail!`. #[error("{0}")] Msg(String), #[error("unwrap none")] UnwrapNone, } pub type Result<T> = std::result::Result<T, Error>; impl Error { pub fn wrap(err: impl std::fmt::Display + Send + Sync + 'static) -> Self { Self::Wrapped(Box::new(err)).bt() } pub fn msg(err: impl std::fmt::Display) -> Self { Self::Msg(err.to_string()).bt() } pub fn debug(err: impl std::fmt::Debug) -> Self { Self::Msg(format!("{err:?}")).bt() } pub fn bt(self) -> Self { let backtrace = std::backtrace::Backtrace::capture(); match backtrace.status() { std::backtrace::BacktraceStatus::Disabled | std::backtrace::BacktraceStatus::Unsupported => self, _ => Self::WithBacktrace { inner: Box::new(self), backtrace: Box::new(backtrace), }, } } pub fn with_path<P: AsRef<std::path::Path>>(self, p: P) -> Self { Self::WithPath { inner: Box::new(self), path: p.as_ref().to_path_buf(), } } pub fn context(self, c: impl std::fmt::Display + Send + Sync + 'static) -> Self { Self::Context { inner: Box::new(self), context: Box::new(c), } } } #[macro_export] macro_rules! bail { ($msg:literal $(,)?) => { return Err($crate::Error::Msg(format!($msg).into()).bt()) }; ($err:expr $(,)?) => { return Err($crate::Error::Msg(format!($err).into()).bt()) }; ($fmt:expr, $($arg:tt)*) => { return Err($crate::Error::Msg(format!($fmt, $($arg)*).into()).bt()) }; } pub fn zip<T, U>(r1: Result<T>, r2: Result<U>) -> Result<(T, U)> { match (r1, r2) { (Ok(r1), Ok(r2)) => Ok((r1, r2)), (Err(e), _) => Err(e), (_, Err(e)) => Err(e), } } // Taken from anyhow. pub trait Context<T> { /// Wrap the error value with additional context. fn context<C>(self, context: C) -> Result<T> where C: std::fmt::Display + Send + Sync + 'static; /// Wrap the error value with additional context that is evaluated lazily /// only once an error does occur. fn with_context<C, F>(self, f: F) -> Result<T> where C: std::fmt::Display + Send + Sync + 'static, F: FnOnce() -> C; } impl<T> Context<T> for Option<T> { fn context<C>(self, context: C) -> Result<T> where C: std::fmt::Display + Send + Sync + 'static, { match self { Some(v) => Ok(v), None => Err(Error::UnwrapNone.context(context).bt()), } } fn with_context<C, F>(self, f: F) -> Result<T> where C: std::fmt::Display + Send + Sync + 'static, F: FnOnce() -> C, { match self { Some(v) => Ok(v), None => Err(Error::UnwrapNone.context(f()).bt()), } } }
candle/candle-core/src/error.rs/0
{ "file_path": "candle/candle-core/src/error.rs", "repo_id": "candle", "token_count": 4127 }
use super::utils::{ get_scale_min_k4, group_for_dequantization, group_for_quantization, make_q3_quants, make_qkx1_quants, make_qx_quants, nearest_int, }; use super::GgmlDType; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; use half::f16; use rayon::prelude::*; // Default to QK_K 256 rather than 64. pub const QK_K: usize = 256; pub const K_SCALE_SIZE: usize = 12; pub const QK4_0: usize = 32; pub const QK4_1: usize = 32; pub const QK5_0: usize = 32; pub const QK5_1: usize = 32; pub const QK8_0: usize = 32; pub const QK8_1: usize = 32; pub trait GgmlType: Sized + Clone + Send + Sync { const DTYPE: GgmlDType; const BLCK_SIZE: usize; type VecDotType: GgmlType; // This is only safe for types that include immediate values such as float/int/... fn zeros() -> Self { unsafe { std::mem::MaybeUninit::zeroed().assume_init() } } fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()>; fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()>; /// Dot product used as a building block for quantized mat-mul. /// n is the number of elements to be considered. fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32>; /// Generic implementation of the dot product without simd optimizations. fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32>; } #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ4_0 { pub(crate) d: f16, pub(crate) qs: [u8; QK4_0 / 2], } const _: () = assert!(std::mem::size_of::<BlockQ4_0>() == 18); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ4_1 { pub(crate) d: f16, pub(crate) m: f16, pub(crate) qs: [u8; QK4_1 / 2], } const _: () = assert!(std::mem::size_of::<BlockQ4_1>() == 20); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ5_0 { pub(crate) d: f16, pub(crate) qh: [u8; 4], pub(crate) qs: [u8; QK5_0 / 2], } const _: () = assert!(std::mem::size_of::<BlockQ5_0>() == 22); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ5_1 { pub(crate) d: f16, pub(crate) m: f16, pub(crate) qh: [u8; 4], pub(crate) qs: [u8; QK5_1 / 2], } const _: () = assert!(std::mem::size_of::<BlockQ5_1>() == 24); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ8_0 { pub(crate) d: f16, pub(crate) qs: [i8; QK8_0], } const _: () = assert!(std::mem::size_of::<BlockQ8_0>() == 34); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ8_1 { pub(crate) d: f16, pub(crate) s: f16, pub(crate) qs: [i8; QK8_1], } const _: () = assert!(std::mem::size_of::<BlockQ8_1>() == 36); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ2K { pub(crate) scales: [u8; QK_K / 16], pub(crate) qs: [u8; QK_K / 4], pub(crate) d: f16, pub(crate) dmin: f16, } const _: () = assert!(QK_K / 16 + QK_K / 4 + 2 * 2 == std::mem::size_of::<BlockQ2K>()); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ3K { pub(crate) hmask: [u8; QK_K / 8], pub(crate) qs: [u8; QK_K / 4], pub(crate) scales: [u8; 12], pub(crate) d: f16, } const _: () = assert!(QK_K / 8 + QK_K / 4 + 12 + 2 == std::mem::size_of::<BlockQ3K>()); #[derive(Debug, Clone, PartialEq)] // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/k_quants.h#L82 #[repr(C)] pub struct BlockQ4K { pub(crate) d: f16, pub(crate) dmin: f16, pub(crate) scales: [u8; K_SCALE_SIZE], pub(crate) qs: [u8; QK_K / 2], } const _: () = assert!(QK_K / 2 + K_SCALE_SIZE + 2 * 2 == std::mem::size_of::<BlockQ4K>()); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ5K { pub(crate) d: f16, pub(crate) dmin: f16, pub(crate) scales: [u8; K_SCALE_SIZE], pub(crate) qh: [u8; QK_K / 8], pub(crate) qs: [u8; QK_K / 2], } const _: () = assert!(QK_K / 8 + QK_K / 2 + 2 * 2 + K_SCALE_SIZE == std::mem::size_of::<BlockQ5K>()); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ6K { pub(crate) ql: [u8; QK_K / 2], pub(crate) qh: [u8; QK_K / 4], pub(crate) scales: [i8; QK_K / 16], pub(crate) d: f16, } const _: () = assert!(3 * QK_K / 4 + QK_K / 16 + 2 == std::mem::size_of::<BlockQ6K>()); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ8K { pub(crate) d: f32, pub(crate) qs: [i8; QK_K], pub(crate) bsums: [i16; QK_K / 16], } const _: () = assert!(4 + QK_K + QK_K / 16 * 2 == std::mem::size_of::<BlockQ8K>()); impl GgmlType for BlockQ4_0 { const DTYPE: GgmlDType = GgmlDType::Q4_0; const BLCK_SIZE: usize = QK4_0; type VecDotType = BlockQ8_0; // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1525 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); let qk = Self::BLCK_SIZE; if k % qk != 0 { crate::bail!("dequantize_row_q4_0: {k} is not divisible by {qk}") } let nb = k / qk; for i in 0..nb { let d = xs[i].d.to_f32(); for j in 0..(qk / 2) { let x0 = (xs[i].qs[j] & 0x0F) as i16 - 8; let x1 = (xs[i].qs[j] >> 4) as i16 - 8; ys[i * qk + j] = (x0 as f32) * d; ys[i * qk + j + qk / 2] = (x1 as f32) * d; } } Ok(()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q4_0 let qk = Self::BLCK_SIZE; let k = xs.len(); if k % qk != 0 { crate::bail!("{k} is not divisible by {}", qk); }; let nb = k / qk; if ys.len() != nb { crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,) } for (i, ys) in ys.iter_mut().enumerate() { let mut amax = 0f32; let mut max = 0f32; let xs = &xs[i * qk..(i + 1) * qk]; for &x in xs.iter() { if amax < x.abs() { amax = x.abs(); max = x; } } let d = max / -8.0; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); for (j, q) in ys.qs.iter_mut().enumerate() { let x0 = xs[j] * id; let x1 = xs[qk / 2 + j] * id; let xi0 = u8::min(15, (x0 + 8.5) as u8); let xi1 = u8::min(15, (x1 + 8.5) as u8); *q = xi0 | (xi1 << 4) } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/b5ffb2849d23afe73647f68eec7b68187af09be6/ggml.c#L2361C10-L2361C122 #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q4_0_q8_0(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q4_0_q8_0(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q4_0_q8_0(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let mut sum_i = 0; for j in 0..qk / 2 { let v0 = (xs.qs[j] & 0x0F) as i32 - 8; let v1 = (xs.qs[j] >> 4) as i32 - 8; sum_i += v0 * ys.qs[j] as i32 + v1 * ys.qs[j + qk / 2] as i32 } sumf += sum_i as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) } Ok(sumf) } } impl GgmlType for BlockQ4_1 { const DTYPE: GgmlDType = GgmlDType::Q4_1; const BLCK_SIZE: usize = QK4_1; type VecDotType = BlockQ8_1; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { // ggml_vec_dot_q4_1_q8_1 let qk = QK8_1; if n % qk != 0 { crate::bail!("vec_dot_q4_1_q8_1: {n} is not divisible by {qk}") } let nb = n / qk; if nb % 2 != 0 { crate::bail!("vec_dot_q4_1_q8_1: {n}, nb is not divisible by 2") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let mut sumi = 0i32; for j in 0..qk / 2 { let v0 = xs.qs[j] as i32 & 0x0F; let v1 = xs.qs[j] as i32 >> 4; sumi += (v0 * ys.qs[j] as i32) + (v1 * ys.qs[j + qk / 2] as i32); } sumf += sumi as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) + f16::to_f32(xs.m) * f16::to_f32(ys.s) } Ok(sumf) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q4_1 let qk = Self::BLCK_SIZE; if ys.len() * qk != xs.len() { crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,) } for (i, ys) in ys.iter_mut().enumerate() { let xs = &xs[i * qk..(i + 1) * qk]; let mut min = f32::INFINITY; let mut max = f32::NEG_INFINITY; for &x in xs.iter() { min = f32::min(x, min); max = f32::max(x, max); } let d = (max - min) / ((1 << 4) - 1) as f32; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); ys.m = f16::from_f32(min); for (j, q) in ys.qs.iter_mut().take(qk / 2).enumerate() { let x0 = (xs[j] - min) * id; let x1 = (xs[qk / 2 + j] - min) * id; let xi0 = u8::min(15, (x0 + 0.5) as u8); let xi1 = u8::min(15, (x1 + 0.5) as u8); *q = xi0 | (xi1 << 4); } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1545 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK4_1 != 0 { crate::bail!("dequantize_row_q4_1: {k} is not divisible by {QK4_1}"); } let nb = k / QK4_1; for i in 0..nb { let d = xs[i].d.to_f32(); let m = xs[i].m.to_f32(); for j in 0..(QK4_1 / 2) { let x0 = xs[i].qs[j] & 0x0F; let x1 = xs[i].qs[j] >> 4; ys[i * QK4_1 + j] = (x0 as f32) * d + m; ys[i * QK4_1 + j + QK4_1 / 2] = (x1 as f32) * d + m; } } Ok(()) } } impl GgmlType for BlockQ5_0 { const DTYPE: GgmlDType = GgmlDType::Q5_0; const BLCK_SIZE: usize = QK5_0; type VecDotType = BlockQ8_0; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = Self::BLCK_SIZE; if n % Self::BLCK_SIZE != 0 { crate::bail!("vec_dot_q5_0_q8_0: {n} is not divisible by {qk}") } let nb = n / qk; if nb % 2 != 0 { crate::bail!("vec_dot_q5_0_q8_0: {n}, nb is not divisible by 2") } Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(_n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let qh = LittleEndian::read_u32(&xs.qh); let mut sumi = 0i32; for j in 0..Self::BLCK_SIZE / 2 { let xh_0 = (((qh & (1u32 << j)) >> j) << 4) as u8; let xh_1 = ((qh & (1u32 << (j + 16))) >> (j + 12)) as u8; let x0 = ((xs.qs[j] & 0x0F) as i32 | xh_0 as i32) - 16; let x1 = ((xs.qs[j] >> 4) as i32 | xh_1 as i32) - 16; sumi += (x0 * ys.qs[j] as i32) + (x1 * ys.qs[j + Self::BLCK_SIZE / 2] as i32); } sumf += sumi as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) } Ok(sumf) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q5_0 let k = xs.len(); if ys.len() * Self::BLCK_SIZE != k { crate::bail!("size mismatch {k} {} {}", ys.len(), Self::BLCK_SIZE) } for (i, ys) in ys.iter_mut().enumerate() { let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE]; let mut amax = 0f32; let mut max = 0f32; for &x in xs.iter() { if amax < x.abs() { amax = x.abs(); max = x; } } let d = max / -16.; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); let mut qh = 0u32; for j in 0..Self::BLCK_SIZE / 2 { let x0 = xs[j] * id; let x1 = xs[j + Self::BLCK_SIZE / 2] * id; let xi0 = ((x0 + 16.5) as i8).min(31) as u8; let xi1 = ((x1 + 16.5) as i8).min(31) as u8; ys.qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); qh |= ((xi0 as u32 & 0x10) >> 4) << j; qh |= ((xi1 as u32 & 0x10) >> 4) << (j + Self::BLCK_SIZE / 2); } LittleEndian::write_u32(&mut ys.qh, qh) } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1566 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK5_0 != 0 { crate::bail!("dequantize_row_q5_0: {k} is not divisible by {QK5_0}"); } let nb = k / QK5_0; for i in 0..nb { let d = xs[i].d.to_f32(); let qh: u32 = LittleEndian::read_u32(&xs[i].qh); for j in 0..(QK5_0 / 2) { let xh_0 = (((qh >> j) << 4) & 0x10) as u8; let xh_1 = ((qh >> (j + 12)) & 0x10) as u8; let x0 = ((xs[i].qs[j] & 0x0F) | xh_0) as i32 - 16; let x1 = ((xs[i].qs[j] >> 4) | xh_1) as i32 - 16; ys[i * QK5_0 + j] = (x0 as f32) * d; ys[i * QK5_0 + j + QK5_0 / 2] = (x1 as f32) * d; } } Ok(()) } } impl GgmlType for BlockQ5_1 { const DTYPE: GgmlDType = GgmlDType::Q5_1; const BLCK_SIZE: usize = QK5_1; type VecDotType = BlockQ8_1; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = Self::BLCK_SIZE; if n % Self::BLCK_SIZE != 0 { crate::bail!("vec_dot_q5_1_q8_1: {n} is not divisible by {qk}") } let nb = n / qk; if nb % 2 != 0 { crate::bail!("vec_dot_q5_1_q8_1: {n}, nb is not divisible by 2") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let qh = LittleEndian::read_u32(&xs.qh); let mut sumi = 0i32; for j in 0..Self::BLCK_SIZE / 2 { let xh_0 = ((qh >> j) << 4) & 0x10; let xh_1 = (qh >> (j + 12)) & 0x10; let x0 = (xs.qs[j] as i32 & 0xF) | xh_0 as i32; let x1 = (xs.qs[j] as i32 >> 4) | xh_1 as i32; sumi += (x0 * ys.qs[j] as i32) + (x1 * ys.qs[j + Self::BLCK_SIZE / 2] as i32); } sumf += sumi as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) + f16::to_f32(xs.m) * f16::to_f32(ys.s) } Ok(sumf) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q5_1 let qk = Self::BLCK_SIZE; if ys.len() * qk != xs.len() { crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,) } for (i, ys) in ys.iter_mut().enumerate() { let xs = &xs[i * qk..(i + 1) * qk]; let mut min = f32::INFINITY; let mut max = f32::NEG_INFINITY; for &x in xs.iter() { min = f32::min(x, min); max = f32::max(x, max); } let d = (max - min) / ((1 << 5) - 1) as f32; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); ys.m = f16::from_f32(min); let mut qh = 0u32; for (j, q) in ys.qs.iter_mut().take(qk / 2).enumerate() { let x0 = (xs[j] - min) * id; let x1 = (xs[qk / 2 + j] - min) * id; let xi0 = (x0 + 0.5) as u8; let xi1 = (x1 + 0.5) as u8; *q = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); // get the 5-th bit and store it in qh at the right position qh |= ((xi0 as u32 & 0x10) >> 4) << j; qh |= ((xi1 as u32 & 0x10) >> 4) << (j + qk / 2); } LittleEndian::write_u32(&mut ys.qh, qh); } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1592 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK5_1 != 0 { crate::bail!("dequantize_row_q5_1: {k} is not divisible by {QK5_1}"); } let nb = k / QK5_1; for i in 0..nb { let d = xs[i].d.to_f32(); let m = xs[i].m.to_f32(); let qh: u32 = LittleEndian::read_u32(&xs[i].qh); for j in 0..(QK5_1 / 2) { let xh_0 = (((qh >> j) << 4) & 0x10) as u8; let xh_1 = ((qh >> (j + 12)) & 0x10) as u8; let x0 = (xs[i].qs[j] & 0x0F) | xh_0; let x1 = (xs[i].qs[j] >> 4) | xh_1; ys[i * QK5_1 + j] = (x0 as f32) * d + m; ys[i * QK5_1 + j + QK5_1 / 2] = (x1 as f32) * d + m; } } Ok(()) } } impl GgmlType for BlockQ8_0 { const DTYPE: GgmlDType = GgmlDType::Q8_0; const BLCK_SIZE: usize = QK8_0; type VecDotType = BlockQ8_0; // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1619 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK8_0 != 0 { crate::bail!("dequantize_row_q8_0: {k} is not divisible by {QK8_0}"); } let nb = k / QK8_0; for i in 0..nb { let d = xs[i].d.to_f32(); for j in 0..QK8_0 { ys[i * QK8_0 + j] = xs[i].qs[j] as f32 * d; } } Ok(()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q8_0 let k = xs.len(); if k % Self::BLCK_SIZE != 0 { crate::bail!("{k} is not divisible by {}", Self::BLCK_SIZE); }; let nb = k / Self::BLCK_SIZE; if ys.len() != nb { crate::bail!( "size mismatch {} {} {}", xs.len(), ys.len(), Self::BLCK_SIZE ) } for (i, ys) in ys.iter_mut().enumerate() { let mut amax = 0f32; let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE]; for &x in xs.iter() { amax = amax.max(x.abs()) } let d = amax / ((1 << 7) - 1) as f32; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); for (y, &x) in ys.qs.iter_mut().zip(xs.iter()) { *y = f32::round(x * id) as i8 } } Ok(()) } #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q8_0_q8_0(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q8_0_q8_0(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q8_0_q8_0(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let sum_i = xs .qs .iter() .zip(ys.qs.iter()) .map(|(&x, &y)| x as i32 * y as i32) .sum::<i32>(); sumf += sum_i as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) } Ok(sumf) } } impl GgmlType for BlockQ8_1 { const DTYPE: GgmlDType = GgmlDType::Q8_1; const BLCK_SIZE: usize = QK8_1; type VecDotType = BlockQ8_1; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> { unimplemented!("no support for vec-dot on Q8_1") } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q8_1 let k = xs.len(); if ys.len() * Self::BLCK_SIZE != k { crate::bail!("size mismatch {k} {} {}", ys.len(), Self::BLCK_SIZE) } for (i, ys) in ys.iter_mut().enumerate() { let mut amax = 0f32; let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE]; for &x in xs.iter() { amax = amax.max(x.abs()) } let d = amax / ((1 << 7) - 1) as f32; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); let mut sum = 0i32; for j in 0..Self::BLCK_SIZE / 2 { let v0 = xs[j] * id; let v1 = xs[j + Self::BLCK_SIZE / 2] * id; ys.qs[j] = f32::round(v0) as i8; ys.qs[j + Self::BLCK_SIZE / 2] = f32::round(v1) as i8; sum += ys.qs[j] as i32 + ys.qs[j + Self::BLCK_SIZE / 2] as i32; } ys.s = f16::from_f32(sum as f32) * ys.d; } Ok(()) } fn to_float(_xs: &[Self], _ys: &mut [f32]) -> Result<()> { unimplemented!("no support for vec-dot on Q8_1") } } impl GgmlType for BlockQ2K { const DTYPE: GgmlDType = GgmlDType::Q2K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q2k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q2k_q8k(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q2k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0.0; for (x, y) in xs.iter().zip(ys.iter()) { let mut q2: &[_] = &x.qs; let mut q8: &[_] = &y.qs; let sc = &x.scales; let mut summs = 0; for (bsum, scale) in y.bsums.iter().zip(sc) { summs += *bsum as i32 * ((scale >> 4) as i32); } let dall = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let mut isum = 0; let mut is = 0; for _ in 0..(QK_K / 128) { let mut shift = 0; for _ in 0..4 { let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = 0; for l in 0..16 { isuml += q8[l] as i32 * (((q2[l] >> shift) & 3) as i32); } isum += d * isuml; let d = (sc[is] & 0xF) as i32; is += 1; isuml = 0; for l in 16..32 { isuml += q8[l] as i32 * (((q2[l] >> shift) & 3) as i32); } isum += d * isuml; shift += 2; // adjust the indexing q8 = &q8[32..]; } // adjust the indexing q2 = &q2[32..]; } sumf += dall * isum as f32 - dmin * summs as f32; } Ok(sumf) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L279 fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { const Q4SCALE: f32 = 15.0; for (block, x) in group_for_quantization(xs, ys)? { //calculate scales and mins let mut mins: [f32; QK_K / 16] = [0.0; QK_K / 16]; let mut scales: [f32; QK_K / 16] = [0.0; QK_K / 16]; for (j, x_scale_slice) in x.chunks(16).enumerate() { (scales[j], mins[j]) = make_qkx1_quants(3, 5, x_scale_slice); } // get max scale and max min and ensure they are >= 0.0 let max_scale = scales.iter().fold(0.0, |max, &val| val.max(max)); let max_min = mins.iter().fold(0.0, |max, &val| val.max(max)); if max_scale > 0.0 { let iscale = Q4SCALE / max_scale; for (j, scale) in scales.iter().enumerate().take(QK_K / 16) { block.scales[j] = nearest_int(iscale * scale) as u8; } block.d = f16::from_f32(max_scale / Q4SCALE); } else { for j in 0..QK_K / 16 { block.scales[j] = 0; } block.d = f16::from_f32(0.0); } if max_min > 0.0 { let iscale = Q4SCALE / max_min; for (j, scale) in block.scales.iter_mut().enumerate() { let l = nearest_int(iscale * mins[j]) as u8; *scale |= l << 4; } block.dmin = f16::from_f32(max_min / Q4SCALE); } else { block.dmin = f16::from_f32(0.0); } let mut big_l: [u8; QK_K] = [0; QK_K]; for j in 0..QK_K / 16 { let d = block.d.to_f32() * (block.scales[j] & 0xF) as f32; if d == 0.0 { continue; } let dm = block.dmin.to_f32() * (block.scales[j] >> 4) as f32; for ii in 0..16 { let ll = nearest_int((x[16 * j + ii] + dm) / d).clamp(0, 3); big_l[16 * j + ii] = ll as u8; } } for j in (0..QK_K).step_by(128) { for ll in 0..32 { block.qs[j / 4 + ll] = big_l[j + ll] | (big_l[j + ll + 32] << 2) | (big_l[j + ll + 64] << 4) | (big_l[j + ll + 96] << 6); } } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L354 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { for (block, y) in group_for_dequantization(xs, ys)? { let d = block.d.to_f32(); let min = block.dmin.to_f32(); let mut is = 0; for (y_block, qs) in y.chunks_exact_mut(128).zip(block.qs.chunks_exact(32)) { // Step by 32 over q. let mut shift = 0; let mut y_block_index = 0; for _j in 0..4 { let sc = block.scales[is]; is += 1; let dl = d * (sc & 0xF) as f32; let ml = min * (sc >> 4) as f32; for q in &qs[..16] { let y = dl * ((q >> shift) & 3) as f32 - ml; y_block[y_block_index] = y; y_block_index += 1; } let sc = block.scales[is]; is += 1; let dl = d * (sc & 0xF) as f32; let ml = min * (sc >> 4) as f32; for q in &qs[16..] { let y = dl * ((q >> shift) & 3) as f32 - ml; y_block[y_block_index] = y; y_block_index += 1; } shift += 2; } } } Ok(()) } } impl GgmlType for BlockQ3K { const DTYPE: GgmlDType = GgmlDType::Q3K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q3k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q3k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q3k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x03030303; const KMASK2: u32 = 0x0f0f0f0f; let mut aux8: [i8; QK_K] = [0; QK_K]; let mut aux16: [i16; 8] = [0; 8]; let mut sums: [f32; 8] = [0.0; 8]; let mut aux32: [i32; 8] = [0; 8]; let mut auxs: [u32; 4] = [0; 4]; for (x, y) in xs.iter().zip(ys.iter()) { let mut q3: &[u8] = &x.qs; let hmask: &[u8] = &x.hmask; let mut q8: &[i8] = &y.qs; aux32.fill(0); let mut a = &mut aux8[..]; let mut m = 1; //Like the GGML original this is written this way to enable the compiler to vectorize it. for _ in 0..QK_K / 128 { a.iter_mut() .take(32) .zip(q3) .for_each(|(a_val, q3_val)| *a_val = (q3_val & 3) as i8); a.iter_mut() .take(32) .zip(hmask) .for_each(|(a_val, hmask_val)| { *a_val -= if hmask_val & m != 0 { 0 } else { 4 } }); a = &mut a[32..]; m <<= 1; a.iter_mut() .take(32) .zip(q3) .for_each(|(a_val, q3_val)| *a_val = ((q3_val >> 2) & 3) as i8); a.iter_mut() .take(32) .zip(hmask) .for_each(|(a_val, hmask_val)| { *a_val -= if hmask_val & m != 0 { 0 } else { 4 } }); a = &mut a[32..]; m <<= 1; a.iter_mut() .take(32) .zip(q3) .for_each(|(a_val, q3_val)| *a_val = ((q3_val >> 4) & 3) as i8); a.iter_mut() .take(32) .zip(hmask) .for_each(|(a_val, hmask_val)| { *a_val -= if hmask_val & m != 0 { 0 } else { 4 } }); a = &mut a[32..]; m <<= 1; a.iter_mut() .take(32) .zip(q3) .for_each(|(a_val, q3_val)| *a_val = ((q3_val >> 6) & 3) as i8); a.iter_mut() .take(32) .zip(hmask) .for_each(|(a_val, hmask_val)| { *a_val -= if hmask_val & m != 0 { 0 } else { 4 } }); a = &mut a[32..]; m <<= 1; q3 = &q3[32..]; } a = &mut aux8[..]; LittleEndian::read_u32_into(&x.scales, &mut auxs[0..3]); let tmp = auxs[2]; auxs[2] = ((auxs[0] >> 4) & KMASK2) | (((tmp >> 4) & KMASK1) << 4); auxs[3] = ((auxs[1] >> 4) & KMASK2) | (((tmp >> 6) & KMASK1) << 4); auxs[0] = (auxs[0] & KMASK2) | (((tmp) & KMASK1) << 4); auxs[1] = (auxs[1] & KMASK2) | (((tmp >> 2) & KMASK1) << 4); for aux in auxs { for scale in aux.to_le_bytes() { let scale = i8::from_be_bytes([scale]); for l in 0..8 { aux16[l] = q8[l] as i16 * a[l] as i16; } for l in 0..8 { aux32[l] += (scale as i32 - 32) * aux16[l] as i32; } q8 = &q8[8..]; a = &mut a[8..]; for l in 0..8 { aux16[l] = q8[l] as i16 * a[l] as i16; } for l in 0..8 { aux32[l] += (scale as i32 - 32) * aux16[l] as i32; } q8 = &q8[8..]; a = &mut a[8..]; } } let d = x.d.to_f32() * y.d; for l in 0..8 { sums[l] += d * aux32[l] as f32; } } Ok(sums.iter().sum()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { for (block, x) in group_for_quantization(xs, ys)? { let mut scales: [f32; QK_K / 16] = [0.0; QK_K / 16]; for (j, x_scale_slice) in x.chunks_exact(16).enumerate() { scales[j] = make_q3_quants(x_scale_slice, 4, true); } // Get max scale by absolute value. let mut max_scale: f32 = 0.0; for &scale in scales.iter() { if scale.abs() > max_scale.abs() { max_scale = scale; } } block.scales.fill(0); if max_scale != 0.0 { let iscale = -32.0 / max_scale; for (j, scale) in scales.iter().enumerate() { let l_val = nearest_int(iscale * scale); let l_val = l_val.clamp(-32, 31) + 32; if j < 8 { block.scales[j] = (l_val & 0xF) as u8; } else { block.scales[j - 8] |= ((l_val & 0xF) << 4) as u8; } let l_val = l_val >> 4; block.scales[j % 4 + 8] |= (l_val << (2 * (j / 4))) as u8; } block.d = f16::from_f32(1.0 / iscale); } else { block.d = f16::from_f32(0.0); } let mut l: [i8; QK_K] = [0; QK_K]; for j in 0..QK_K / 16 { let sc = if j < 8 { block.scales[j] & 0xF } else { block.scales[j - 8] >> 4 }; let sc = (sc | (((block.scales[8 + j % 4] >> (2 * (j / 4))) & 3) << 4)) as i8 - 32; let d = block.d.to_f32() * sc as f32; if d != 0.0 { for ii in 0..16 { let l_val = nearest_int(x[16 * j + ii] / d); l[16 * j + ii] = (l_val.clamp(-4, 3) + 4) as i8; } } } block.hmask.fill(0); let mut m = 0; let mut hm = 1; for ll in l.iter_mut() { if *ll > 3 { block.hmask[m] |= hm; *ll -= 4; } m += 1; if m == QK_K / 8 { m = 0; hm <<= 1; } } for j in (0..QK_K).step_by(128) { for l_val in 0..32 { block.qs[j / 4 + l_val] = (l[j + l_val] | (l[j + l_val + 32] << 2) | (l[j + l_val + 64] << 4) | (l[j + l_val + 96] << 6)) as u8; } } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L533 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { const KMASK1: u32 = 0x03030303; const KMASK2: u32 = 0x0f0f0f0f; for (block, y) in group_for_dequantization(xs, ys)? { //Reconstruct the scales let mut aux = [0; 4]; LittleEndian::read_u32_into(&block.scales, &mut aux[0..3]); let tmp = aux[2]; aux[2] = ((aux[0] >> 4) & KMASK2) | (((tmp >> 4) & KMASK1) << 4); aux[3] = ((aux[1] >> 4) & KMASK2) | (((tmp >> 6) & KMASK1) << 4); aux[0] = (aux[0] & KMASK2) | (((tmp) & KMASK1) << 4); aux[1] = (aux[1] & KMASK2) | (((tmp >> 2) & KMASK1) << 4); //Transfer the scales into an i8 array let scales: &mut [i8] = unsafe { std::slice::from_raw_parts_mut(aux.as_mut_ptr() as *mut i8, 16) }; let d_all = block.d.to_f32(); let mut m = 1; let mut is = 0; // Dequantize both 128 long blocks // 32 qs values per 128 long block // Each 16 elements get a scale for (y, qs) in y.chunks_exact_mut(128).zip(block.qs.chunks_exact(32)) { let mut shift = 0; for shift_scoped_y in y.chunks_exact_mut(32) { for (scale_index, scale_scoped_y) in shift_scoped_y.chunks_exact_mut(16).enumerate() { let dl = d_all * (scales[is] as f32 - 32.0); for (i, inner_y) in scale_scoped_y.iter_mut().enumerate() { let new_y = dl * (((qs[i + 16 * scale_index] >> shift) & 3) as i8 - if (block.hmask[i + 16 * scale_index] & m) == 0 { 4 } else { 0 }) as f32; *inner_y = new_y; } // 16 block finished => advance scale index is += 1; } // 32 block finished => increase shift and m shift += 2; m <<= 1; } } } Ok(()) } } impl GgmlType for BlockQ4K { const DTYPE: GgmlDType = GgmlDType::Q4K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q4k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q4k_q8k(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q4k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; let mut utmp: [u32; 4] = [0; 4]; let mut scales: [u8; 8] = [0; 8]; let mut mins: [u8; 8] = [0; 8]; let mut aux8: [i8; QK_K] = [0; QK_K]; let mut aux16: [i16; 8] = [0; 8]; let mut sums: [f32; 8] = [0.0; 8]; let mut aux32: [i32; 8] = [0; 8]; let mut sumf = 0.0; for (y, x) in ys.iter().zip(xs.iter()) { let q4 = &x.qs; let q8 = &y.qs; aux32.fill(0); let mut a = &mut aux8[..]; let mut q4 = &q4[..]; for _ in 0..QK_K / 64 { for l in 0..32 { a[l] = (q4[l] & 0xF) as i8; } a = &mut a[32..]; for l in 0..32 { a[l] = (q4[l] >> 4) as i8; } a = &mut a[32..]; q4 = &q4[32..]; } LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; //extract scales and mins LittleEndian::write_u32_into(&utmp[0..2], &mut scales); LittleEndian::write_u32_into(&utmp[2..4], &mut mins); let mut sumi = 0; for j in 0..QK_K / 16 { sumi += y.bsums[j] as i32 * mins[j / 2] as i32; } let mut a = &mut aux8[..]; let mut q8 = &q8[..]; for scale in scales { let scale = scale as i32; for _ in 0..4 { for l in 0..8 { aux16[l] = q8[l] as i16 * a[l] as i16; } for l in 0..8 { aux32[l] += scale * aux16[l] as i32; } q8 = &q8[8..]; a = &mut a[8..]; } } let d = x.d.to_f32() * y.d; for l in 0..8 { sums[l] += d * aux32[l] as f32; } let dmin = x.dmin.to_f32() * y.d; sumf -= dmin * sumi as f32; } Ok(sumf + sums.iter().sum::<f32>()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { for (block, x) in group_for_quantization(xs, ys)? { let mut mins: [f32; QK_K / 32] = [0.0; QK_K / 32]; let mut scales: [f32; QK_K / 32] = [0.0; QK_K / 32]; for (j, x_scale_slice) in x.chunks_exact(32).enumerate() { (scales[j], mins[j]) = make_qkx1_quants(15, 5, x_scale_slice); } // get max scale and max min and ensure they are >= 0.0 let max_scale = scales.iter().fold(0.0, |max, &val| val.max(max)); let max_min = mins.iter().fold(0.0, |max, &val| val.max(max)); let inv_scale = if max_scale > 0.0 { 63.0 / max_scale } else { 0.0 }; let inv_min = if max_min > 0.0 { 63.0 / max_min } else { 0.0 }; for j in 0..QK_K / 32 { let ls = nearest_int(inv_scale * scales[j]).min(63) as u8; let lm = nearest_int(inv_min * mins[j]).min(63) as u8; if j < 4 { block.scales[j] = ls; block.scales[j + 4] = lm; } else { block.scales[j + 4] = (ls & 0xF) | ((lm & 0xF) << 4); block.scales[j - 4] |= (ls >> 4) << 6; block.scales[j] |= (lm >> 4) << 6; } } block.d = f16::from_f32(max_scale / 63.0); block.dmin = f16::from_f32(max_min / 63.0); let mut l: [u8; QK_K] = [0; QK_K]; for j in 0..QK_K / 32 { let (sc, m) = get_scale_min_k4(j, &block.scales); let d = block.d.to_f32() * sc as f32; if d != 0.0 { let dm = block.dmin.to_f32() * m as f32; for ii in 0..32 { let l_val = nearest_int((x[32 * j + ii] + dm) / d); l[32 * j + ii] = l_val.clamp(0, 15) as u8; } } } let q = &mut block.qs; for j in (0..QK_K).step_by(64) { for l_val in 0..32 { let offset_index = (j / 64) * 32 + l_val; q[offset_index] = l[j + l_val] | (l[j + l_val + 32] << 4); } } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L735 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { for (block, y) in group_for_dequantization(xs, ys)? { let d = block.d.to_f32(); let min = block.dmin.to_f32(); let q = &block.qs; let mut is = 0; let mut ys_index = 0; for j in (0..QK_K).step_by(64) { let q = &q[j / 2..j / 2 + 32]; let (sc, m) = get_scale_min_k4(is, &block.scales); let d1 = d * sc as f32; let m1 = min * m as f32; let (sc, m) = get_scale_min_k4(is + 1, &block.scales); let d2 = d * sc as f32; let m2 = min * m as f32; for q in q { y[ys_index] = d1 * (q & 0xF) as f32 - m1; ys_index += 1; } for q in q { y[ys_index] = d2 * (q >> 4) as f32 - m2; ys_index += 1; } is += 2; } } Ok(()) } } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L928 impl GgmlType for BlockQ5K { const DTYPE: GgmlDType = GgmlDType::Q5K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q5k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q5k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q5k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; let mut utmp: [u32; 4] = [0; 4]; let mut scales: [u8; 8] = [0; 8]; let mut mins: [u8; 8] = [0; 8]; let mut aux8: [i8; QK_K] = [0; QK_K]; let mut aux16: [i16; 8] = [0; 8]; let mut sums: [f32; 8] = [0.0; 8]; let mut aux32: [i32; 8] = [0; 8]; let mut sumf = 0.0; for (y, x) in ys.iter().zip(xs.iter()) { let q5 = &x.qs; let hm = &x.qh; let q8 = &y.qs; aux32.fill(0); let mut a = &mut aux8[..]; let mut q5 = &q5[..]; let mut m = 1u8; for _ in 0..QK_K / 64 { for l in 0..32 { a[l] = (q5[l] & 0xF) as i8; a[l] += if hm[l] & m != 0 { 16 } else { 0 }; } a = &mut a[32..]; m <<= 1; for l in 0..32 { a[l] = (q5[l] >> 4) as i8; a[l] += if hm[l] & m != 0 { 16 } else { 0 }; } a = &mut a[32..]; m <<= 1; q5 = &q5[32..]; } LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; //extract scales and mins LittleEndian::write_u32_into(&utmp[0..2], &mut scales); LittleEndian::write_u32_into(&utmp[2..4], &mut mins); let mut sumi = 0; for j in 0..QK_K / 16 { sumi += y.bsums[j] as i32 * mins[j / 2] as i32; } let mut a = &mut aux8[..]; let mut q8 = &q8[..]; for scale in scales { let scale = scale as i32; for _ in 0..4 { for l in 0..8 { aux16[l] = q8[l] as i16 * a[l] as i16; } for l in 0..8 { aux32[l] += scale * aux16[l] as i32; } q8 = &q8[8..]; a = &mut a[8..]; } } let d = x.d.to_f32() * y.d; for l in 0..8 { sums[l] += d * aux32[l] as f32; } let dmin = x.dmin.to_f32() * y.d; sumf -= dmin * sumi as f32; } Ok(sumf + sums.iter().sum::<f32>()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L793 fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { for (block, x) in group_for_quantization(xs, ys)? { let mut mins: [f32; QK_K / 32] = [0.0; QK_K / 32]; let mut scales: [f32; QK_K / 32] = [0.0; QK_K / 32]; for (j, x_scale_slice) in x.chunks_exact(32).enumerate() { (scales[j], mins[j]) = make_qkx1_quants(31, 5, x_scale_slice); } // get max scale and max min and ensure they are >= 0.0 let max_scale = scales.iter().fold(0.0, |max, &val| val.max(max)); let max_min = mins.iter().fold(0.0, |max, &val| val.max(max)); let inv_scale = if max_scale > 0.0 { 63.0 / max_scale } else { 0.0 }; let inv_min = if max_min > 0.0 { 63.0 / max_min } else { 0.0 }; for j in 0..QK_K / 32 { let ls = nearest_int(inv_scale * scales[j]).min(63) as u8; let lm = nearest_int(inv_min * mins[j]).min(63) as u8; if j < 4 { block.scales[j] = ls; block.scales[j + 4] = lm; } else { block.scales[j + 4] = (ls & 0xF) | ((lm & 0xF) << 4); block.scales[j - 4] |= (ls >> 4) << 6; block.scales[j] |= (lm >> 4) << 6; } } block.d = f16::from_f32(max_scale / 63.0); block.dmin = f16::from_f32(max_min / 63.0); let mut l: [u8; QK_K] = [0; QK_K]; for j in 0..QK_K / 32 { let (sc, m) = get_scale_min_k4(j, &block.scales); let d = block.d.to_f32() * sc as f32; if d == 0.0 { continue; } let dm = block.dmin.to_f32() * m as f32; for ii in 0..32 { let ll = nearest_int((x[32 * j + ii] + dm) / d); l[32 * j + ii] = ll.clamp(0, 31) as u8; } } let qh = &mut block.qh; let ql = &mut block.qs; qh.fill(0); let mut m1 = 1; let mut m2 = 2; for n in (0..QK_K).step_by(64) { let offset = (n / 64) * 32; for j in 0..32 { let mut l1 = l[n + j]; if l1 > 15 { l1 -= 16; qh[j] |= m1; } let mut l2 = l[n + j + 32]; if l2 > 15 { l2 -= 16; qh[j] |= m2; } ql[offset + j] = l1 | (l2 << 4); } m1 <<= 2; m2 <<= 2; } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L928 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { for (block, y) in group_for_dequantization(xs, ys)? { let d = block.d.to_f32(); let min = block.dmin.to_f32(); let ql = &block.qs; let qh = &block.qh; let mut is = 0; let mut u1 = 1; let mut u2 = 2; let mut ys_index = 0; for j in (0..QK_K).step_by(64) { let ql = &ql[j / 2..j / 2 + 32]; let (sc, m) = get_scale_min_k4(is, &block.scales); let d1 = d * sc as f32; let m1 = min * m as f32; let (sc, m) = get_scale_min_k4(is + 1, &block.scales); let d2 = d * sc as f32; let m2 = min * m as f32; for (ql, qh) in ql.iter().zip(qh) { let to_add = if qh & u1 != 0 { 16f32 } else { 0f32 }; y[ys_index] = d1 * ((ql & 0xF) as f32 + to_add) - m1; ys_index += 1; } for (ql, qh) in ql.iter().zip(qh) { let to_add = if qh & u2 != 0 { 16f32 } else { 0f32 }; y[ys_index] = d2 * ((ql >> 4) as f32 + to_add) - m2; ys_index += 1; } is += 2; u1 <<= 2; u2 <<= 2; } } Ok(()) } } impl GgmlType for BlockQ6K { const DTYPE: GgmlDType = GgmlDType::Q6K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q6k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q6k_q8k(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q6k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}") } let mut aux8 = [0i8; QK_K]; let mut aux16 = [0i16; 8]; let mut sums = [0f32; 8]; let mut aux32 = [0f32; 8]; for (x, y) in xs.iter().zip(ys.iter()) { let q4 = &x.ql; let qh = &x.qh; let q8 = &y.qs; aux32.fill(0f32); for j in (0..QK_K).step_by(128) { let aux8 = &mut aux8[j..]; let q4 = &q4[j / 2..]; let qh = &qh[j / 4..]; for l in 0..32 { aux8[l] = (((q4[l] & 0xF) | ((qh[l] & 3) << 4)) as i32 - 32) as i8; aux8[l + 32] = (((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i32 - 32) as i8; aux8[l + 64] = (((q4[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i32 - 32) as i8; aux8[l + 96] = (((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i32 - 32) as i8; } } for (j, &scale) in x.scales.iter().enumerate() { let scale = scale as f32; let q8 = &q8[16 * j..]; let aux8 = &aux8[16 * j..]; for l in 0..8 { aux16[l] = q8[l] as i16 * aux8[l] as i16; } for l in 0..8 { aux32[l] += scale * aux16[l] as f32 } let q8 = &q8[8..]; let aux8 = &aux8[8..]; for l in 0..8 { aux16[l] = q8[l] as i16 * aux8[l] as i16; } for l in 0..8 { aux32[l] += scale * aux16[l] as f32 } } let d = x.d.to_f32() * y.d; for (sum, &a) in sums.iter_mut().zip(aux32.iter()) { *sum += a * d; } } Ok(sums.iter().sum()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { if xs.len() != ys.len() * Self::BLCK_SIZE { crate::bail!( "quantize_row_q6k: size mismatch {} {} {}", xs.len(), ys.len(), Self::BLCK_SIZE ) } let mut l = [0i8; QK_K]; let mut scales = [0f32; QK_K / 16]; let mut x = xs.as_ptr(); let l = l.as_mut_ptr(); unsafe { for y in ys.iter_mut() { let mut max_scale = 0f32; let mut max_abs_scale = 0f32; for (ib, scale_) in scales.iter_mut().enumerate() { let scale = make_qx_quants(16, 32, x.add(16 * ib), l.add(16 * ib), 1); *scale_ = scale; let abs_scale = scale.abs(); if abs_scale > max_abs_scale { max_abs_scale = abs_scale; max_scale = scale } } let iscale = -128f32 / max_scale; y.d = f16::from_f32(1.0 / iscale); for (y_scale, scale) in y.scales.iter_mut().zip(scales.iter()) { *y_scale = nearest_int(iscale * scale).min(127) as i8 } for (j, &y_scale) in y.scales.iter().enumerate() { let d = y.d.to_f32() * y_scale as f32; if d == 0. { continue; } for ii in 0..16 { let ll = nearest_int(*x.add(16 * j + ii) / d).clamp(-32, 31); *l.add(16 * j + ii) = (ll + 32) as i8 } } let mut ql = y.ql.as_mut_ptr(); let mut qh = y.qh.as_mut_ptr(); for j in (0..QK_K).step_by(128) { for l_idx in 0..32 { let q1 = *l.add(j + l_idx) & 0xF; let q2 = *l.add(j + l_idx + 32) & 0xF; let q3 = *l.add(j + l_idx + 64) & 0xF; let q4 = *l.add(j + l_idx + 96) & 0xF; *ql.add(l_idx) = (q1 | (q3 << 4)) as u8; *ql.add(l_idx + 32) = (q2 | (q4 << 4)) as u8; *qh.add(l_idx) = ((*l.add(j + l_idx) >> 4) | ((*l.add(j + l_idx + 32) >> 4) << 2) | ((*l.add(j + l_idx + 64) >> 4) << 4) | ((*l.add(j + l_idx + 96) >> 4) << 6)) as u8; } ql = ql.add(64); qh = qh.add(32); } x = x.add(QK_K) } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L1067 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK_K != 0 { crate::bail!("dequantize_row_q6k: {k} is not divisible by {QK_K}") } for (idx_x, x) in xs.iter().enumerate() { let d = x.d.to_f32(); let ql = &x.ql; let qh = &x.qh; let sc = &x.scales; for n in (0..QK_K).step_by(128) { let idx = n / 128; let ys = &mut ys[idx_x * QK_K + n..]; let sc = &sc[8 * idx..]; let ql = &ql[64 * idx..]; let qh = &qh[32 * idx..]; for l in 0..32 { let is = l / 16; let q1 = ((ql[l] & 0xF) | ((qh[l] & 3) << 4)) as i8 - 32; let q2 = ((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i8 - 32; let q3 = ((ql[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i8 - 32; let q4 = ((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i8 - 32; ys[l] = d * sc[is] as f32 * q1 as f32; ys[l + 32] = d * sc[is + 2] as f32 * q2 as f32; ys[l + 64] = d * sc[is + 4] as f32 * q3 as f32; ys[l + 96] = d * sc[is + 6] as f32 * q4 as f32; } } } Ok(()) } } impl GgmlType for BlockQ8K { const DTYPE: GgmlDType = GgmlDType::Q8K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q8k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q8k_q8k(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q8k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = QK_K; if n % QK_K != 0 { crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let sum_i = xs .qs .iter() .zip(ys.qs.iter()) .map(|(&x, &y)| x as i32 * y as i32) .sum::<i32>(); sumf += sum_i as f32 * xs.d * ys.d } Ok(sumf) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { let k = xs.len(); if k % QK_K != 0 { crate::bail!("quantize_row_q8k: {k} is not divisible by {QK_K}") } for (i, y) in ys.iter_mut().enumerate() { let mut max = 0f32; let mut amax = 0f32; let xs = &xs[i * QK_K..(i + 1) * QK_K]; for &x in xs.iter() { if amax < x.abs() { amax = x.abs(); max = x; } } if amax == 0f32 { y.d = 0f32; y.qs.fill(0) } else { let iscale = -128f32 / max; for (j, q) in y.qs.iter_mut().enumerate() { // ggml uses nearest_int with bit magic here, maybe we want the same // but we would have to test and benchmark it. let v = (iscale * xs[j]).round(); *q = v.min(127.) as i8 } for j in 0..QK_K / 16 { let mut sum = 0i32; for ii in 0..16 { sum += y.qs[j * 16 + ii] as i32 } y.bsums[j] = sum as i16 } y.d = 1.0 / iscale } } Ok(()) } fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK_K != 0 { crate::bail!("dequantize_row_q8k: {k} is not divisible by {QK_K}") } for (i, x) in xs.iter().enumerate() { for (j, &q) in x.qs.iter().enumerate() { ys[i * QK_K + j] = x.d * q as f32 } } Ok(()) } } // https://github.com/ggerganov/llama.cpp/blob/b5ffb2849d23afe73647f68eec7b68187af09be6/ggml.c#L10605 pub fn matmul<T: GgmlType>( mkn: (usize, usize, usize), lhs: &[f32], rhs_t: &[T], dst: &mut [f32], ) -> Result<()> { let (m, k, n) = mkn; if m * k != lhs.len() { crate::bail!("unexpected lhs length {} {mkn:?}", lhs.len()); } let k_in_lhs_blocks = k.div_ceil(T::BLCK_SIZE); let k_in_rhs_blocks = k.div_ceil(T::VecDotType::BLCK_SIZE); // TODO: Do not make this copy if the DotType is f32. // TODO: Pre-allocate this. let mut lhs_b = vec![T::VecDotType::zeros(); m * k_in_lhs_blocks]; for row_idx in 0..m { let lhs_b = &mut lhs_b[row_idx * k_in_lhs_blocks..(row_idx + 1) * k_in_lhs_blocks]; let lhs = &lhs[row_idx * k..(row_idx + 1) * k]; T::VecDotType::from_float(lhs, lhs_b)? } let lhs_b = lhs_b.as_slice(); for row_idx in 0..m { let lhs_row = &lhs_b[row_idx * k_in_lhs_blocks..(row_idx + 1) * k_in_lhs_blocks]; let dst_row = &mut dst[row_idx * n..(row_idx + 1) * n]; let result: Result<Vec<_>> = dst_row .into_par_iter() .enumerate() .with_min_len(128) .with_max_len(512) .map(|(col_idx, dst)| { let rhs_col = &rhs_t[col_idx * k_in_rhs_blocks..(col_idx + 1) * k_in_rhs_blocks]; T::vec_dot(k, rhs_col, lhs_row).map(|value| *dst = value) }) .collect(); result?; } Ok(()) } impl GgmlType for f32 { const DTYPE: GgmlDType = GgmlDType::F32; const BLCK_SIZE: usize = 1; type VecDotType = f32; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if xs.len() < n { crate::bail!("size mismatch {} < {n}", xs.len()) } if ys.len() < n { crate::bail!("size mismatch {} < {n}", ys.len()) } let mut res = 0f32; unsafe { crate::cpu::vec_dot_f32(xs.as_ptr(), ys.as_ptr(), &mut res, n) }; Ok(res) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { if xs.len() != ys.len() { crate::bail!("size mismatch {} {}", xs.len(), ys.len()); } ys.copy_from_slice(xs); Ok(()) } fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { if xs.len() != ys.len() { crate::bail!("size mismatch {} {}", xs.len(), ys.len()); } ys.copy_from_slice(xs); Ok(()) } } impl GgmlType for f16 { const DTYPE: GgmlDType = GgmlDType::F16; const BLCK_SIZE: usize = 1; type VecDotType = f16; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if xs.len() < n { crate::bail!("size mismatch {} < {n}", xs.len()) } if ys.len() < n { crate::bail!("size mismatch {} < {n}", ys.len()) } let mut res = 0f32; unsafe { crate::cpu::vec_dot_f16(xs.as_ptr(), ys.as_ptr(), &mut res, n) }; Ok(res) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { if xs.len() != ys.len() { crate::bail!("size mismatch {} {}", xs.len(), ys.len()); } // TODO: vectorize for (x, y) in xs.iter().zip(ys.iter_mut()) { *y = f16::from_f32(*x) } Ok(()) } fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { if xs.len() != ys.len() { crate::bail!("size mismatch {} {}", xs.len(), ys.len()); } // TODO: vectorize for (x, y) in xs.iter().zip(ys.iter_mut()) { *y = x.to_f32() } Ok(()) } }
candle/candle-core/src/quantized/k_quants.rs/0
{ "file_path": "candle/candle-core/src/quantized/k_quants.rs", "repo_id": "candle", "token_count": 42632 }
//! Useful functions for checking features. use std::str::FromStr; pub fn get_num_threads() -> usize { // Respond to the same environment variable as rayon. match std::env::var("RAYON_NUM_THREADS") .ok() .and_then(|s| usize::from_str(&s).ok()) { Some(x) if x > 0 => x, Some(_) | None => num_cpus::get(), } } pub fn has_accelerate() -> bool { cfg!(feature = "accelerate") } pub fn has_mkl() -> bool { cfg!(feature = "mkl") } pub fn cuda_is_available() -> bool { cfg!(feature = "cuda") } pub fn metal_is_available() -> bool { cfg!(feature = "metal") } pub fn with_avx() -> bool { cfg!(target_feature = "avx") } pub fn with_neon() -> bool { cfg!(target_feature = "neon") } pub fn with_simd128() -> bool { cfg!(target_feature = "simd128") } pub fn with_f16c() -> bool { cfg!(target_feature = "f16c") }
candle/candle-core/src/utils.rs/0
{ "file_path": "candle/candle-core/src/utils.rs", "repo_id": "candle", "token_count": 398 }
use candle_core::{test_device, test_utils, DType, Device, IndexOp, Result, Tensor, D}; fn zeros(device: &Device) -> Result<()> { let tensor = Tensor::zeros((5, 2), DType::F32, device)?; let (dim1, dim2) = tensor.dims2()?; assert_eq!(dim1, 5); assert_eq!(dim2, 2); Ok(()) } fn ones(device: &Device) -> Result<()> { assert_eq!( Tensor::ones((2, 3), DType::U8, device)?.to_vec2::<u8>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::U32, device)?.to_vec2::<u32>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::I64, device)?.to_vec2::<i64>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::F32, device)?.to_vec2::<f32>()?, [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ); assert_eq!( Tensor::ones((2, 3), DType::F64, device)?.to_vec2::<f64>()?, [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ); assert_eq!( Tensor::ones((2, 3), DType::F16, device)?.to_vec2::<half::f16>()?, [ [ half::f16::from_f32(1.0), half::f16::from_f32(1.0), half::f16::from_f32(1.0) ], [ half::f16::from_f32(1.0), half::f16::from_f32(1.0), half::f16::from_f32(1.0) ] ], ); assert_eq!( Tensor::ones((2, 3), DType::BF16, device)?.to_vec2::<half::bf16>()?, [ [ half::bf16::from_f32(1.0), half::bf16::from_f32(1.0), half::bf16::from_f32(1.0) ], [ half::bf16::from_f32(1.0), half::bf16::from_f32(1.0), half::bf16::from_f32(1.0) ] ], ); Ok(()) } fn full(device: &Device) -> Result<()> { assert_eq!( Tensor::full(42u32, (2, 3), device)?.to_vec2::<u32>()?, [[42, 42, 42], [42, 42, 42]], ); Ok(()) } fn arange(device: &Device) -> Result<()> { assert_eq!( Tensor::arange(0u8, 5u8, device)?.to_vec1::<u8>()?, [0, 1, 2, 3, 4], ); assert_eq!( Tensor::arange_step(0u8, 5u8, 2, device)?.to_vec1::<u8>()?, [0, 2, 4], ); assert_eq!( Tensor::arange_step(0u8, 5u8, 3, device)?.to_vec1::<u8>()?, [0, 3], ); assert_eq!( Tensor::arange_step(5i64, 0i64, -1, device)?.to_vec1::<i64>()?, [5, 4, 3, 2, 1], ); Ok(()) } fn add_mul(device: &Device) -> Result<()> { let tensor = Tensor::new(&[3f32, 1., 4.], device)?; let dim1 = tensor.dims1()?; assert_eq!(dim1, 3); let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [3., 1., 4.]); let tensor = Tensor::add(&tensor, &tensor)?; let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [6., 2., 8.]); let tensor = Tensor::mul(&tensor, &tensor)?; let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [36., 4., 64.]); Ok(()) } fn tensor_2d(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?; let dims = tensor.dims2()?; assert_eq!(dims, (2, 5)); let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content, data); Ok(()) } fn clamp(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?; let tensor = tensor.clamp(1.5, 6.2)?; assert_eq!( tensor.to_vec2::<f32>()?, [[3.0, 1.5, 4.0, 1.5, 5.0], [2.0, 1.5, 6.2, 6.2, 2.0]], ); Ok(()) } fn asort(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1.1, 5.], [2.1, 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?; let indexes = tensor.arg_sort_last_dim(true)?; assert_eq!( indexes.to_vec2::<u32>()?, [[1, 3, 0, 2, 4], [1, 4, 0, 2, 3]], ); let indexes = tensor.arg_sort_last_dim(false)?; assert_eq!( indexes.to_vec2::<u32>()?, [[4, 2, 0, 3, 1], [3, 2, 0, 4, 1]], ); let (sorted, indexes) = tensor.sort_last_dim(true)?; assert_eq!( indexes.to_vec2::<u32>()?, [[1, 3, 0, 2, 4], [1, 4, 0, 2, 3]], ); assert_eq!( sorted.to_vec2::<f32>()?, [[1.0, 1.1, 3.0, 4.0, 5.0], [1.0, 2.0, 2.1, 7.0, 8.0]] ); let (sorted, indexes) = tensor.sort_last_dim(false)?; assert_eq!( indexes.to_vec2::<u32>()?, [[4, 2, 0, 3, 1], [3, 2, 0, 4, 1]], ); assert_eq!( sorted.to_vec2::<f32>()?, [[5.0, 4.0, 3.0, 1.1, 1.0], [8.0, 7.0, 2.1, 2.0, 1.0]] ); Ok(()) } fn unary_op(device: &Device) -> Result<()> { let data = &[[-3f32, 1., 4., -0.1, 0.5], [2.7, -1.8, -0.28, 1.8, 2.8]]; let tensor = Tensor::new(data, device)?; assert_eq!( test_utils::to_vec2_round(&tensor.gelu()?, 4)?, [ [-0.0036, 0.8412, 3.9999, -0.046, 0.3457], [2.6911, -0.0647, -0.1091, 1.7353, 2.7933] ] ); let t_f16 = tensor.to_dtype(DType::F16)?.gelu()?.to_dtype(DType::F32)?; let max_diff = (tensor.gelu()? - t_f16)?.flatten_all()?.max(0)?; assert!(max_diff.to_vec0::<f32>()? < 5e-3); assert_eq!( test_utils::to_vec2_round(&tensor.gelu_erf()?, 4)?, [ [-0.004, 0.8413, 3.9999, -0.046, 0.3457], [2.6906, -0.0647, -0.1091, 1.7353, 2.7928] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.erf()?, 4)?, [ [-1.0, 0.8427, 1.0, -0.1125, 0.5205], [0.9999, -0.9891, -0.3079, 0.9891, 0.9999] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.silu()?, 4)?, [ [-0.1423, 0.7311, 3.9281, -0.0475, 0.3112], [2.53, -0.2553, -0.1205, 1.5447, 2.6395] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.ceil()?, 4)?, [[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -1.0, -0.0, 2.0, 3.0]] ); assert_eq!( test_utils::to_vec2_round(&tensor.floor()?, 4)?, [[-3.0, 1.0, 4.0, -1.0, 0.0], [2.0, -2.0, -1.0, 1.0, 2.0]] ); assert_eq!( test_utils::to_vec2_round(&tensor.round()?, 4)?, [[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -2.0, -0.0, 2.0, 3.0]] ); let tensor = Tensor::new(&[2997.9246, 314.15926f32], device)?; assert_eq!( test_utils::to_vec1_round(&tensor.round_to(2)?, 4)?, [2997.92, 314.16] ); assert_eq!( test_utils::to_vec1_round(&tensor.round_to(-2)?, 4)?, [3000.0, 300.] ); let tensor = Tensor::new( &[-1.01f32, -0.9, -0.1, 0.0, -0.0, 0.1, 0.9, 1.0, 1.1], device, )?; assert_eq!( tensor.sign()?.to_vec1::<f32>()?, [-1., -1., -1., 0., 0., 1., 1., 1., 1.] ); let tensor = Tensor::new(&[-1.0f32, 0., -2., 3.], device)?; let y = tensor.elu(2.)?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [-1.2642, 0.0000, -1.7293, 3.0000] ); // This test failed on metal prior to the following PR: // https://github.com/huggingface/candle/pull/2490 let y = tensor.reshape((2, 2))?.t()?.elu(2.)?.flatten_all()?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [-1.2642, -1.7293, 0.0000, 3.0000] ); Ok(()) } fn binary_op(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor1 = Tensor::new(data, device)?; let data2 = &[[5f32, 5., 5., 5., 5.], [2., 1., 7., 8., 2.]]; let tensor2 = Tensor::new(data2, device)?; let tensor = (&tensor1 + (&tensor1 * &tensor1)? / (&tensor1 + &tensor2))?; let dims = tensor.dims2()?; assert_eq!(dims, (2, 5)); let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content[0], [4.125, 1.1666666, 5.7777777, 1.1666666, 7.5]); assert_eq!(content[1], [3.0, 1.5, 10.5, 12.0, 3.0]); #[allow(clippy::eq_op)] let tensor = (&tensor - &tensor)?; let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content[0], [0., 0., 0., 0., 0.]); let min = tensor1.minimum(&(&tensor2 * 0.5)?)?; let max = tensor1.maximum(&(&tensor2 * 0.5)?)?; assert_eq!( min.to_vec2::<f32>()?, [[2.5, 1.0, 2.5, 1.0, 2.5], [1.0, 0.5, 3.5, 4.0, 1.0]], ); assert_eq!( max.to_vec2::<f32>()?, [[3.0, 2.5, 4.0, 2.5, 5.0], [2.0, 1.0, 7.0, 8.0, 2.0]] ); Ok(()) } fn transpose(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?.t()?; let dims = tensor.dims2()?; assert_eq!(dims, (5, 2)); assert_eq!( tensor.to_vec2::<f32>()?, &[[3f32, 2.], [1., 1.], [4., 7.], [1., 8.], [5., 2.]] ); assert_eq!(tensor.t()?.to_vec2::<f32>()?, data); assert_eq!(tensor.contiguous()?.t()?.to_vec2::<f32>()?, data); assert_eq!(((tensor + 1.)?.t()? - 1.)?.to_vec2::<f32>()?, data); Ok(()) } fn var(device: &Device) -> Result<()> { // Values taken from https://pytorch.org/docs/stable/generated/torch.var.html let data = &[ [0.2035f32, 1.2959, 1.8101, -0.4644], [1.5027, -0.3270, 0.5905, 0.6538], [-1.5745, 1.3330, -0.5596, -0.6548], [0.1264, -0.5080, 1.6420, 0.1992], ]; let tensor = Tensor::new(data, device)?; assert_eq!( test_utils::to_vec2_round(&tensor.var_keepdim(1)?, 4)?, &[[1.0631], [0.559], [1.4893], [0.8258]] ); Ok(()) } fn sum(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.sum_keepdim(2)?.to_vec3::<u32>()?, &[[[8], [15]], [[10], [18]]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec3::<u32>()?, &[[[5, 2, 11], [9, 7, 17]]], ); assert_eq!(tensor.sum_keepdim((0, 2, 1))?.to_vec3::<u32>()?, &[[[51]]],); assert_eq!( tensor.t()?.sum_keepdim(1)?.t()?.to_vec3::<u32>()?, &[[[8], [15]], [[10], [18]]] ); assert_eq!( tensor.sum_keepdim((2, 1))?.to_vec3::<u32>()?, &[[[8 + 15]], [[10 + 18]]] ); let data: Vec<u32> = (0..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.sum_keepdim(0)?.to_vec1::<u32>()?, &[7998000]); let tensor = tensor.reshape((2000, 2))?; assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]); assert_eq!( tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec2::<u32>()?, &[[3998000, 4000000]] ); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]); assert_eq!( tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec2::<u32>()?, &[[3998000, 4000000]] ); let t1 = tensor.reshape((200, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor.sum_keepdim((0, 1, 2))?.to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(0)? .sum_keepdim(2)? .sum_keepdim(1)? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(0)? .sum_keepdim((1, 2))? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(1)? .sum_keepdim((0, 2))? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec3::<u32>()?, &[[ [398000, 398200, 398400, 398600], [398800, 399000, 399200, 399400], [399600, 399800, 400000, 400200], [400400, 400600, 400800, 401000], [401200, 401400, 401600, 401800] ]] ); } Ok(()) } fn min(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.min_keepdim(2)?.to_vec3::<u32>()?, &[[[1], [1]], [[1], [2]]] ); assert_eq!( tensor.min_keepdim(0)?.to_vec3::<u32>()?, &[[[2, 1, 4], [1, 2, 8]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.min_keepdim(0)?.to_vec1::<u32>()?, &[200]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!( tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!( tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .min_keepdim(0)? .min_keepdim(2)? .min_keepdim(1)? .to_vec3::<u32>()?, &[[[200]]] ); assert_eq!( tensor.min_keepdim(0)?.to_vec3::<u32>()?, &[[ [200, 201, 202, 203], [204, 205, 206, 207], [208, 209, 210, 211], [212, 213, 214, 215], [216, 217, 218, 219] ]] ); } Ok(()) } fn max(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.max_keepdim(2)?.to_vec3::<u32>()?, &[[[4], [9]], [[7], [8]]] ); assert_eq!( tensor.max_keepdim(0)?.to_vec3::<u32>()?, &[[[3, 1, 7], [8, 5, 9]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.max_keepdim(0)?.to_vec1::<u32>()?, &[3999]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!( tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!( tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .max_keepdim(0)? .max_keepdim(2)? .max_keepdim(1)? .to_vec3::<u32>()?, &[[[3999]]] ); assert_eq!( tensor.max_keepdim(0)?.to_vec3::<u32>()?, &[[ [3980, 3981, 3982, 3983], [3984, 3985, 3986, 3987], [3988, 3989, 3990, 3991], [3992, 3993, 3994, 3995], [3996, 3997, 3998, 3999] ]] ); } Ok(()) } fn argmin(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.argmin_keepdim(2)?.to_vec3::<u32>()?, &[[[1], [0]], [[1], [1]]] ); assert_eq!( tensor.argmin_keepdim(0)?.to_vec3::<u32>()?, &[[[1, 0, 0], [0, 1, 1]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.argmin_keepdim(0)?.to_vec1::<u32>()?, &[0]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmin_keepdim(1)? .argmin_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmin_keepdim(1)? .argmin_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(2)? .argmin_keepdim(1)? .to_vec3::<u32>()?, &[[[0]]] ); assert_eq!( tensor.argmin_keepdim(0)?.to_vec3::<u32>()?, &[[ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], ]] ); } Ok(()) } fn argmax(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.argmax_keepdim(2)?.to_vec3::<u32>()?, &[[[2], [2]], [[2], [0]]] ); assert_eq!( tensor.argmax_keepdim(0)?.to_vec3::<u32>()?, &[[[0, 0, 1], [1, 0, 0]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.argmax_keepdim(0)?.to_vec1::<u32>()?, &[3799]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmax_keepdim(1)? .argmax_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmax_keepdim(1)? .argmax_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(2)? .argmax_keepdim(1)? .to_vec3::<u32>()?, &[[[0]]] ); assert_eq!( tensor.argmax_keepdim(0)?.to_vec3::<u32>()?, &[[ [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], ]] ); } Ok(()) } fn narrow(device: &Device) -> Result<()> { let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.narrow(2, 1, 2)?.to_vec3::<f32>()?, &[[[1.0, 4.0], [5.0, 9.0]], [[1.0, 7.0], [2.0, 8.0]]], ); assert_eq!( tensor.narrow(1, 1, 1)?.to_vec3::<f32>()?, &[[[1.0, 5.0, 9.0]], [[8.0, 2.0, 8.0]]], ); assert_eq!( tensor.narrow(0, 0, 1)?.to_vec3::<f32>()?, &[[[3.0, 1.0, 4.0], [1.0, 5.0, 9.0]]], ); assert_eq!( tensor.narrow(0, 1, 1)?.to_vec3::<f32>()?, &[[[2.0, 1.0, 7.0], [8.0, 2.0, 8.0]]], ); // The following has been checked against PyTorch via: // import torch // t = torch.tensor([[[3., 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]]) // t.transpose(-1, -2).narrow(1, 1, 2) assert_eq!( tensor.t()?.narrow(1, 1, 2)?.to_vec3::<f32>()?, &[[[1.0, 5.0], [4.0, 9.0]], [[1.0, 2.0], [7.0, 8.0]]], ); Ok(()) } fn broadcast(device: &Device) -> Result<()> { let data = &[3f32, 1., 4.]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.broadcast_left((3, 1))?.to_vec3::<f32>()?, &[[[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]]] ); Ok(()) } fn slice_set(device: &Device) -> Result<()> { let (b, h, max_t, d) = (2, 4, 7, 3); let cache = Tensor::zeros((b, h, max_t, d), DType::F32, device)?; let tensor = Tensor::randn(0f32, 1f32, (b, h, 4, d), device)?; cache.slice_set(&tensor, 2, 0)?; let cache_t = cache.narrow(2, 0, 4)?; let diff = (cache_t - &tensor)?.abs()?.sum_all()?.to_vec0::<f32>()?; assert_eq!(diff, 0.); cache.slice_set(&tensor, 2, 1)?; let cache_t = cache.narrow(2, 1, 4)?; let diff = (cache_t - &tensor)?.abs()?.sum_all()?.to_vec0::<f32>()?; assert_eq!(diff, 0.); let ones = Tensor::ones((b, h, 1, d), DType::F32, device)?; cache.slice_set(&ones, 2, 6)?; let diff = cache.narrow(2, 5, 1)?.abs()?.sum_all()?.to_vec0::<f32>()?; assert_eq!(diff, 0.); let diff = (cache.narrow(2, 6, 1)? - 1.)? .abs()? .sum_all()? .to_vec0::<f32>()?; assert_eq!(diff, 0.); // This used to create a deadlock rather than returning an actual error. assert!(cache.slice_set(&cache, 0, 0).is_err()); Ok(()) } fn cat(device: &Device) -> Result<()> { // 1D let t1 = Tensor::new(&[3f32, 1., 4.], device)?; let t2 = Tensor::new(&[1f32, 5., 9., 2.], device)?; let t3 = Tensor::new(&[6f32, 5., 3., 5., 8., 9.], device)?; assert_eq!(Tensor::cat(&[&t1], 0)?.to_vec1::<f32>()?, [3f32, 1., 4.],); assert_eq!( Tensor::cat(&[&t1, &t2], 0)?.to_vec1::<f32>()?, [3f32, 1., 4., 1., 5., 9., 2.], ); assert_eq!( Tensor::cat(&[&t1, &t2, &t3], 0)?.to_vec1::<f32>()?, [3f32, 1., 4., 1., 5., 9., 2., 6., 5., 3., 5., 8., 9.], ); // 2D let data = &[[3f32, 1., 4., 1., 5.], [2., 7., 1., 8., 2.]]; let t1 = Tensor::new(data, device)?; let data2 = &[[5f32, 5., 5., 5., 5.], [2., 7., 1., 8., 2.]]; let t2 = Tensor::new(data2, device)?; assert_eq!( Tensor::cat(&[&t1, &t2], 0)?.to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0], [5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0] ] ); // PyTorch equivalent: // import torch // t1 = torch.tensor([[3, 1, 4, 1, 5], [2, 7, 1, 8, 2]]) // t2 = torch.tensor([[5]*5, [2, 7, 1, 8, 2]]) // torch.cat([t1.t(), t2.t()], dim=1).t() assert_eq!( Tensor::cat(&[&t1.t()?, &t2.t()?], 1)? .t()? .to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0], [5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0] ] ); assert_eq!( Tensor::cat(&[&t1, &t2], 1)?.to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0, 2.0, 7.0, 1.0, 8.0, 2.0] ] ); // 3D let t1 = Tensor::arange(0, 48i64, device)?.reshape((2, 6, 4))?; let t2 = Tensor::arange(100, 124i64, device)?.reshape((2, 3, 4))?; let t3 = Tensor::arange(10000, 10032i64, device)?.reshape((2, 4, 4))?; let t_cat = Tensor::cat(&[&t1, &t2, &t3], 1)?; let t1 = t1.t()?.contiguous()?.t()?; let t2 = t2.t()?.contiguous()?.t()?; let t3 = t3.t()?.contiguous()?.t()?; let t_cat2 = Tensor::cat(&[&t1, &t2, &t3], 1)?; let diff = t_cat.eq(&t_cat2)?.to_dtype(DType::F32)?.sum_all()?; assert_eq!(diff.to_vec0::<f32>()?, 104.0); assert_eq!(t_cat.i((0, 0, 0))?.to_vec0::<i64>()?, 0); assert_eq!(t_cat.i((0, 4, 0))?.to_vec0::<i64>()?, 16); assert_eq!(t_cat.i((0, 5, 0))?.to_vec0::<i64>()?, 20); assert_eq!(t_cat.i((1, 5, 0))?.to_vec0::<i64>()?, 44); assert_eq!(t_cat.i((0, 6, 0))?.to_vec0::<i64>()?, 100); assert_eq!(t_cat.i((1, 6, 0))?.to_vec0::<i64>()?, 112); assert_eq!(t_cat.i((0, 6, 1))?.to_vec0::<i64>()?, 101); assert_eq!(t_cat.i((0, 7, 1))?.to_vec0::<i64>()?, 105); assert_eq!(t_cat.i((0, 12, 1))?.to_vec0::<i64>()?, 10013); assert_eq!(t_cat.i((1, 12, 3))?.to_vec0::<i64>()?, 10031); Ok(()) } fn embeddings(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?; let t = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?; let hs = t.embedding(&ids)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]); let hs = t.index_select(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]); let hs = t.index_select(&ids.to_dtype(DType::I64)?, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]); Ok(()) } fn cmp(device: &Device) -> Result<()> { let t1 = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?; let t2 = Tensor::new(&[[1f32, 0f32], [3f32, 3f32], [4f32, 7f32]], device)?; assert_eq!(t1.eq(&t2)?.to_vec2::<u8>()?, &[[0, 0], [0, 1], [1, 0]]); assert_eq!(t1.ne(&t2)?.to_vec2::<u8>()?, &[[1, 1], [1, 0], [0, 1]]); assert_eq!(t1.le(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 1], [1, 1]]); assert_eq!(t1.lt(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 0], [0, 1]]); assert_eq!(t1.gt(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 0], [0, 0]]); assert_eq!(t1.ge(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 1], [1, 0]]); Ok(()) } fn index_select(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); for dtype in [DType::U8, DType::U32, DType::I64] { let ids = ids.to_dtype(dtype)?; let hs = t.index_select(&ids, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [0.0, 2.0, 1.0], [3.0, 5.0, 4.0], [6.0, 8.0, 7.0], [9.0, 11.0, 10.0] ] ); let hs = t.index_select(&ids, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[[0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0]] ); // Prior to https://github.com/huggingface/candle/pull/1022 // There would be a bug where the last values in the result tensor would be set to 0. let ids = Tensor::new(&[0u32, 2u32, 1u32, 0u32, 2u32, 1u32], device)?; let hs = t.index_select(&ids, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0], [0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0], ] ); // Test when selecting dim > 0 with ids size different from elem count of // target dim in source/input. let ids = Tensor::new(&[1u32, 0u32, 1u32], device)?; let t = Tensor::arange(1f32, 5f32, device)?.reshape((2, 2))?; assert_eq!(t.to_vec2::<f32>()?, &[[1.0, 2.0], [3.0, 4.0]]); let hs = t.index_select(&ids, 1)?; assert_eq!(hs.to_vec2::<f32>()?, &[[2.0, 1.0, 2.0], [4.0, 3.0, 4.0]]); } Ok(()) } fn index_add(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 1u32, 1u32], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let init = Tensor::ones((4, 2), DType::F32, device)?; let hs = init.index_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[1.0, 4.0], [4.0, 10.0], [7.0, 16.0], [10.0, 22.0]], ); let init = Tensor::zeros((4, 2), DType::F32, device)?; let ids = Tensor::new(&[1u32, 0u32, 0u32], device)?; let hs = init.index_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[3.0, 0.0], [9.0, 3.0], [15.0, 6.0], [21.0, 9.0]], ); let init = Tensor::zeros((6, 3), DType::F32, device)?; let ids = Tensor::new(&[5u32, 0u32, 1u32, 0u32], device)?; let hs = init.index_add(&ids, &t, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [12.0, 14.0, 16.0], [6.0, 7.0, 8.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 2.0] ] ); Ok(()) } fn slice_scatter(device: &Device) -> Result<()> { let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let src = Tensor::arange(100f32, 106f32, device)?.reshape((2, 3))?; assert_eq!( t.slice_scatter0(&src, 0)?.to_vec2::<f32>()?, &[ [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); assert_eq!( t.slice_scatter0(&src, 1)?.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], [9.0, 10.0, 11.0] ] ); assert_eq!( t.slice_scatter0(&src, 2)?.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], ] ); Ok(()) } fn scatter_add(device: &Device) -> Result<()> { let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let ids = Tensor::new(&[[0u32, 1, 2], [3, 4, 0], [3, 3, 1], [2, 0, 4]], device)?; let init = Tensor::ones((4, 5), DType::F32, device)?; let hs = init.scatter_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [1.0, 2.0, 3.0, 1.0, 1.0], [6.0, 1.0, 1.0, 4.0, 5.0], [1.0, 9.0, 1.0, 14.0, 1.0], [11.0, 1.0, 10.0, 1.0, 12.0] ] ); let init = Tensor::ones((6, 3), DType::F32, device)?; let hs = init.scatter_add(&ids, &t, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [1.0, 11.0, 6.0], [1.0, 2.0, 9.0], [10.0, 1.0, 3.0], [10.0, 8.0, 1.0], [1.0, 5.0, 12.0], [1.0, 1.0, 1.0] ] ); Ok(()) } fn gather(device: &Device) -> Result<()> { let ids = Tensor::new(&[[0u32], [2u32], [1u32], [0u32]], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let hs = t.gather(&ids, 1)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0], [5.0], [7.0], [9.0]]); let ids = Tensor::new( &[[0u32, 0u32], [2u32, 0u32], [1u32, 1u32], [0u32, 2u32]], device, )?; let hs = t.gather(&ids, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[0.0, 0.0], [5.0, 3.0], [7.0, 7.0], [9.0, 11.0]] ); let ids = Tensor::new(&[[0u32, 2u32, 0u32]], device)?; let hs = t.gather(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0]]); let ids = Tensor::new(&[[0u32, 2u32, 0u32], [0u32, 1u32, 1u32]], device)?; let hs = t.gather(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0], [0.0, 4.0, 5.0]]); // Random data // Dim: 0 let t = Tensor::new( &[ [ [108_f32, -47., 16., -56., -83., -130., 210.], [253., 95., 151., 228., -210., -123., -127.], [-9., -217., 2., -78., 163., 245., -204.], [-246., 79., -238., 88., -226., -184., 171.], [8., -48., -153., 234., -34., 166., -153.], [124., 0., -10., -61., -242., -15., -238.], ], [ [12., -64., -199., 244., -240., 156., -128.], [173., -57., 4., -198., 233., -110., 238.], [95., 82., 0., 240., 53., -211., 209.], [-122., 167., -212., 227., -144., 61., 118.], [-63., -146., 200., 244., 168., -167., 116.], [-125., -147., 110., -253., -178., -250., -18.], ], [ [57., 86., -50., 56., 92., 205., -78.], [-137., -156., -18., 248., -61., -239., 14.], [-248., -30., -50., -70., -251., 250., -83.], [-221., 67., 72., 59., -24., -154., 232.], [-144., -23., -74., 5., 93., 171., 205.], [46., -77., -38., -226., 246., 161., -17.], ], [ [-153., -231., -236., 161., 126., 2., -22.], [-229., -41., 209., 164., 234., 160., 57.], [223., 254., -186., -162., -46., -160., -102.], [65., 30., 213., -253., 59., 224., -154.], [-82., -203., -177., 17., 31., -256., -246.], [176., -135., -65., 54., -56., 210., 76.], ], [ [-10., -245., 168., 124., -14., -33., -178.], [25., -43., -39., 132., -89., 169., 179.], [187., -215., 32., -133., 87., -7., -168.], [-224., -215., -5., -230., -58., -162., 128.], [158., -137., -122., -100., -202., -83., 136.], [30., -185., -144., 250., 209., -40., 127.], ], [ [-196., 108., -245., 122., 146., -228., 62.], [-1., -66., 160., 137., 13., -172., -21.], [244., 199., -164., 28., 119., -175., 198.], [-62., 253., -162., 195., -95., -230., -211.], [123., -72., -26., -107., -139., 64., 245.], [11., -126., -182., 108., -12., 184., -127.], ], [ [-159., 126., 176., 161., 73., -111., -138.], [-187., 214., -217., -33., -223., -201., -212.], [-61., -120., -166., -172., -95., 53., 196.], [-33., 86., 134., -152., 154., -53., 74.], [186., -28., -154., -174., 141., -109., 217.], [82., 35., 252., 145., 181., 74., -87.], ], ], device, )?; let ids = Tensor::new( &[ [ [6_u32, 6, 4, 3, 4, 4, 6], [3, 3, 2, 4, 4, 4, 6], [3, 3, 0, 2, 4, 6, 4], [2, 5, 1, 2, 6, 6, 1], [2, 1, 6, 5, 3, 2, 3], [6, 1, 0, 1, 0, 2, 6], ], [ [4, 6, 4, 3, 3, 3, 2], [4, 3, 2, 4, 4, 4, 6], [2, 3, 0, 2, 4, 6, 4], [6, 5, 1, 2, 6, 6, 1], [4, 1, 6, 5, 3, 2, 3], [1, 1, 0, 1, 0, 2, 6], ], [ [3, 6, 4, 3, 3, 3, 2], [2, 3, 2, 4, 4, 4, 6], [4, 3, 0, 2, 4, 6, 4], [0, 5, 1, 2, 6, 6, 1], [6, 1, 6, 5, 3, 2, 3], [4, 1, 0, 1, 0, 2, 6], ], [ [0, 6, 4, 3, 3, 3, 2], [5, 3, 2, 4, 4, 4, 6], [0, 3, 0, 2, 4, 6, 4], [3, 5, 1, 2, 6, 6, 1], [0, 1, 6, 5, 3, 2, 3], [3, 1, 0, 1, 0, 2, 6], ], ], device, )?; let hs = t.gather(&ids, 0)?; assert_eq!( hs.to_vec3::<f32>()?, &[ [ [-159_f32, 126., 168., 161., -14., -33., -138.], [-229., -41., -18., 132., -89., 169., -212.], [223., 254., 2., -70., 87., 53., -168.], [-221., 253., -212., 59., 154., -53., 118.], [-144., -146., -154., -107., 31., 171., -246.], [82., -147., -10., -253., -242., 161., -87.] ], [ [-10., 126., 168., 161., 126., 2., -78.], [25., -41., -18., 132., -89., 169., -212.], [-248., 254., 2., -70., 87., 53., -168.], [-33., 253., -212., 59., 154., -53., 118.], [158., -146., -154., -107., 31., 171., -246.], [-125., -147., -10., -253., -242., 161., -87.] ], [ [-153., 126., 168., 161., 126., 2., -78.], [-137., -41., -18., 132., -89., 169., -212.], [187., 254., 2., -70., 87., 53., -168.], [-246., 253., -212., 59., 154., -53., 118.], [186., -146., -154., -107., 31., 171., -246.], [30., -147., -10., -253., -242., 161., -87.] ], [ [108., 126., 168., 161., 126., 2., -78.], [-1., -41., -18., 132., -89., 169., -212.], [-9., 254., 2., -70., 87., 53., -168.], [65., 253., -212., 59., 154., -53., 118.], [8., -146., -154., -107., 31., 171., -246.], [176., -147., -10., -253., -242., 161., -87.] ] ] ); // Dim: 1 let t = Tensor::new( &[ [ [-117_f32, -175., 69., -163.], [200., 242., -21., -67.], [179., 150., -126., -75.], [-118., 38., -138., -13.], [-221., 136., -185., 180.], [58., 182., -204., -149.], ], [ [3., -148., -58., -154.], [-43., 45., -108., 4.], [-69., -249., -71., -21.], [80., 110., -152., -235.], [-88., 7., 92., -250.], [-186., 207., -242., 98.], ], [ [238., 19., 64., -242.], [-150., -97., 218., 58.], [111., -233., 204., -212.], [-242., -232., 83., 42.], [153., 62., -251., 219.], [-117., 36., -119., 10.], ], [ [215., 159., -169., -27.], [-83., 101., -88., 169.], [-205., 93., 225., -64.], [-162., 240., 214., 23.], [-112., 6., 21., 245.], [-38., 113., 93., 215.], ], [ [91., -188., -148., 101.], [74., 203., -35., 55.], [-116., -130., -153., -96.], [58., 22., -45., -194.], [-221., -134., 73., 159.], [-203., -254., 31., 235.], ], [ [105., -53., 61., 186.], [-195., 234., 75., -1.], [51., 139., 160., -108.], [-173., -167., 161., 19.], [83., -246., 156., -222.], [109., 39., -149., 137.], ], ], device, )?; let ids = Tensor::new( &[ [[4_u32, 4, 4, 2]], [[0, 4, 4, 3]], [[1, 5, 3, 4]], [[0, 3, 3, 2]], [[1, 1, 5, 2]], [[1, 4, 5, 4]], ], device, )?; let hs = t.gather(&ids, 1)?; assert_eq!( hs.to_vec3::<f32>()?, &[ [[-221., 136., -185., -75.]], [[3., 7., 92., -235.]], [[-150., 36., 83., 219.]], [[215., 240., 214., -64.]], [[74., 203., 31., -96.]], [[-195., -246., -149., -222.]] ] ); // Dim: 2 let t = Tensor::new( &[ [[-162_f32, 202.], [-126., -39.], [35., -65.], [1., 80.]], [[37., 248.], [-191., 89.], [117., -40.], [-217., 220.]], ], device, )?; let ids = Tensor::new(&[[[1_u32], [0], [1], [1]], [[0], [1], [0], [1]]], device)?; let hs = t.gather(&ids, 2)?; assert_eq!( hs.to_vec3::<f32>()?, &[ [[202.], [-126.], [-65.], [80.]], [[37.], [89.], [117.], [220.]] ] ); let t = Tensor::new( &[ [[-21_f32, -197.], [194., 122.]], [[255., -106.], [-191., 250.]], [[33., -117.], [43., 10.]], [[-130., 238.], [-217., -92.]], ], device, )?; let ids = Tensor::new( &[ [[0_u32, 1], [1, 0]], [[1, 0], [0, 1]], [[0, 1], [0, 1]], [[1, 0], [1, 0]], ], device, )?; let hs = t.gather(&ids, 2)?; assert_eq!( hs.to_vec3::<f32>()?, &[ [[-21., -197.], [122., 194.]], [[-106., 255.], [-191., 250.]], [[33., -117.], [43., 10.]], [[238., -130.], [-92., -217.]] ] ); Ok(()) } fn broadcasting(device: &Device) -> Result<()> { let t1 = Tensor::arange(0f32, 24f32, device)?.reshape((4, 2, 3))?; let t2 = Tensor::new(&[100f32, 200f32], device)?; let s = t1.broadcast_add(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[100.0, 101.0, 102.0], [203.0, 204.0, 205.0]], [[106.0, 107.0, 108.0], [209.0, 210.0, 211.0]], [[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]], [[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]] ] ); let s = t1.t()?.broadcast_add(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[100.0, 203.0], [101.0, 204.0], [102.0, 205.0]], [[106.0, 209.0], [107.0, 210.0], [108.0, 211.0]], [[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]], [[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]] ] ); let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-100.0, -99.0, -98.0], [-197.0, -196.0, -195.0]], [[-94.0, -93.0, -92.0], [-191.0, -190.0, -189.0]], [[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]], [[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]] ] ); let s = t1.t()?.broadcast_sub(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-100.0, -197.0], [-99.0, -196.0], [-98.0, -195.0]], [[-94.0, -191.0], [-93.0, -190.0], [-92.0, -189.0]], [[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]], [[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]] ] ); // Test a narrowed version as this uses a layout start_offset. let t1 = t1.i(2..)?; let s = t1.broadcast_add(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]], [[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]] ] ); let s = t1.t()?.broadcast_add(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]], [[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]] ] ); let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]], [[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]] ] ); let s = t1.t()?.broadcast_sub(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]], [[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]] ] ); let t3 = Tensor::new(1f32, device)?.broadcast_div(&t2)?; let s = t1.broadcast_mul(&t2.reshape((2, 1))?)?; let s_div = t1.broadcast_div(&t3.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[1200.0, 1300.0, 1400.0], [3000.0, 3200.0, 3400.0]], [[1800.0, 1900.0, 2000.0], [4200.0, 4400.0, 4600.0]] ] ); assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,); let s = t1.t()?.broadcast_mul(&t2)?; let s_div = t1.t()?.broadcast_div(&t3)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[1200.0, 3000.0], [1300.0, 3200.0], [1400.0, 3400.0]], [[1800.0, 4200.0], [1900.0, 4400.0], [2000.0, 4600.0]] ] ); assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,); Ok(()) } fn randn(device: &Device) -> Result<()> { let tensor = Tensor::randn(0f32, 1f32, (5, 3), device)?; assert_eq!(tensor.dims(), [5, 3]); // Check that the seed gets updated by checking that // a new series of numbers is generated each time let tensor2 = Tensor::randn(0f32, 1f32, (5, 3), device)?; assert_ne!(tensor.to_vec2::<f32>()?, tensor2.to_vec2::<f32>()?); let tensor = Tensor::rand(0f32, 1f32, (5, 3), device)?; assert_eq!(tensor.dims(), [5, 3]); // Check that the seed gets updated by checking that // a new series of numbers is generated each time let tensor2 = Tensor::rand(0f32, 1f32, (5, 3), device)?; assert_ne!(tensor.to_vec2::<f32>()?, tensor2.to_vec2::<f32>()?); // We do not expect deterministic elements at any index. // There once was a bug that had a deterministic zero element in evenly sized tensors. const N: usize = 2; let v = (0..100) .map(|_| Tensor::randn(0f32, 1f32, N, device).and_then(|t| t.to_vec1::<f32>())) .collect::<Result<Vec<_>>>()?; assert!( (0..N).all(|i| v.windows(2).any(|pair| pair[0][i] != pair[1][i])), "There are deterministic values in the randn tensors" ); let v = (0..100) .map(|_| Tensor::rand(0f32, 1f32, N, device).and_then(|t| t.to_vec1::<f32>())) .collect::<Result<Vec<_>>>()?; assert!( (0..N).all(|i| v.windows(2).any(|pair| pair[0][i] != pair[1][i])), "There are deterministic values in the rand tensors" ); Ok(()) } fn zero_dim(device: &Device) -> Result<()> { let t = Tensor::zeros((4, 0, 1), DType::F32, device)?; assert_eq!(t.dims3()?, (4, 0, 1)); let t2 = Tensor::zeros((4, 3, 1), DType::F32, device)?; let t_cat = Tensor::cat(&[&t, &t2], 1)?; assert_eq!(t_cat.dims3()?, (4, 3, 1)); let t_cat = Tensor::cat(&[&t, &t], 1)?; assert_eq!(t_cat.dims3()?, (4, 0, 1)); let t_unary = t.sqrt()?; assert_eq!(t_unary.dims3()?, (4, 0, 1)); let t_plus = (&t + 1.)?; assert_eq!(t_plus.dims3()?, (4, 0, 1)); let t_mm = t2.matmul(&t.t()?)?; assert_eq!(t_mm.dims3()?, (4, 3, 0)); let t_mm = t.matmul(&t2.t()?)?; assert_eq!(t_mm.dims3()?, (4, 0, 3)); let t_mm = t.t()?.matmul(&t)?; assert_eq!(t_mm.dims3()?, (4, 1, 1)); Ok(()) } test_device!(zeros, zeros_cpu, zeros_gpu, zeros_metal); test_device!(ones, ones_cpu, ones_gpu, ones_metal); test_device!(full, full_cpu, full_gpu, full_metal); test_device!(arange, arange_cpu, arange_gpu, arange_metal); test_device!(add_mul, add_mul_cpu, add_mul_gpu, add_mul_metal); test_device!(tensor_2d, tensor_2d_cpu, tensor_2d_gpu, tensor_2d_metal); test_device!(narrow, narrow_cpu, narrow_gpu, narrow_metal); test_device!(broadcast, broadcast_cpu, broadcast_gpu, broadcast_metal); test_device!(slice_set, ss_cpu, ss_gpu, ss_metal); test_device!(cat, cat_cpu, cat_gpu, cat_metal); test_device!(sum, sum_cpu, sum_gpu, sum_metal); test_device!(min, min_cpu, min_gpu, min_metal); test_device!(max, max_cpu, max_gpu, max_metal); test_device!(argmax, argmax_cpu, argmax_gpu, argmax_metal); test_device!(argmin, argmin_cpu, argmin_gpu, argmin_metal); test_device!(transpose, transpose_cpu, transpose_gpu, transpose_metal); test_device!(unary_op, unary_op_cpu, unary_op_gpu, unary_op_metal); test_device!(binary_op, binary_op_cpu, binary_op_gpu, binary_op_metal); test_device!(embeddings, embeddings_cpu, embeddings_gpu, embeddings_metal); test_device!(cmp, cmp_cpu, cmp_gpu, cmp_metal); test_device!( broadcasting, broadcasting_cpu, broadcasting_gpu, broadcasting_metal ); test_device!( index_select, index_select_cpu, index_select_gpu, index_select_metal ); test_device!(index_add, index_add_cpu, index_add_gpu, index_add_metal); test_device!(gather, gather_cpu, gather_gpu, gather_metal); test_device!( scatter_add, scatter_add_cpu, scatter_add_gpu, scatter_add_metal ); test_device!( slice_scatter, slice_scatter_cpu, slice_scatter_gpu, slice_scatter_metal ); test_device!(randn, randn_cpu, randn_gpu, randn_metal); test_device!(clamp, clamp_cpu, clamp_gpu, clamp_metal); test_device!(asort, asort_cpu, asort_gpu, asort_metal); test_device!(var, var_cpu, var_gpu, var_metal); test_device!(zero_dim, zero_dim_cpu, zero_dim_gpu, zero_dim_metal); // There was originally a bug on the CPU implementation for randn // https://github.com/huggingface/candle/issues/381 #[test] fn randn_hasneg() -> Result<()> { let t = Tensor::randn(0f32, 1f32, 200, &Device::Cpu)?.to_vec1::<f32>()?; if t.iter().all(|&v| v >= 0.) { candle_core::bail!("all values in tensors are non-negative") } Ok(()) } #[test] fn pad_with_same() -> Result<()> { let t = Tensor::arange(1f32, 5f32, &Device::Cpu)?.reshape((2, 2))?; let t0 = t.pad_with_same(0, 1, 2)?; assert_eq!( t0.to_vec2::<f32>()?, [[1.0, 2.0], [1.0, 2.0], [3.0, 4.0], [3.0, 4.0], [3.0, 4.0]] ); let t1 = t.pad_with_same(1, 1, 2)?; assert_eq!( t1.to_vec2::<f32>()?, [[1.0, 1.0, 2.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 4.0]] ); Ok(()) } #[test] fn i64_abs() -> Result<()> { let t = Tensor::new(&[-42i64, 1337], &Device::Cpu)?; let t = t.abs()?; assert_eq!(t.to_vec1::<i64>()?, [42, 1337]); Ok(()) } #[test] fn tril_triu_eye() -> Result<()> { let t = Tensor::tril2(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0] ], ); let t = Tensor::triu2(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0] ] ); let t = Tensor::eye(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0] ] ); Ok(()) } #[test] fn cumsum() -> Result<()> { let t = &[3f32, 1., 4., 1., 5.]; let t = Tensor::new(t, &Device::Cpu)?; assert_eq!(t.cumsum(0)?.to_vec1::<f32>()?, [3., 4., 8., 9., 14.]); let t = t.unsqueeze(1)?; assert_eq!( t.cumsum(0)?.to_vec2::<f32>()?, [[3.0], [4.0], [8.0], [9.0], [14.0]] ); assert_eq!( t.cumsum(1)?.to_vec2::<f32>()?, [[3.0], [1.0], [4.0], [1.0], [5.0]] ); let t = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let t = Tensor::new(t, &Device::Cpu)?; assert_eq!( t.cumsum(1)?.to_vec2::<f32>()?, [[3.0, 4.0, 8.0, 9.0, 14.0], [2.0, 3.0, 10.0, 18.0, 20.0]], ); assert_eq!( t.cumsum(0)?.to_vec2::<f32>()?, [[3.0, 1.0, 4.0, 1.0, 5.0], [5.0, 2.0, 11.0, 9.0, 7.0]] ); Ok(()) } /// A helper function for floating point comparison. Both a and b must be 1D Tensor and contains the same amount of data. /// Assertion passes if the difference of all pairs of a and b is smaller than epsilon. fn assert_close(a: &Tensor, b: &Tensor, epsilon: f64) -> Result<()> { let a_vec: Vec<f64> = a.to_vec1()?; let b_vec: Vec<f64> = b.to_vec1()?; assert_eq!(a_vec.len(), b_vec.len()); for (a, b) in a_vec.iter().zip(b_vec.iter()) { assert!((a - b).abs() < epsilon); } Ok(()) } #[test] fn log_sum_exp() -> Result<()> { let input = Tensor::new( &[ [[1f64, 2., 3.], [4., 5., 6.]], [[-1000.0, -999.0, -1001.0], [1000.0, 999.0, 1001.0]], ], &Device::Cpu, )?; let output = input.log_sum_exp(D::Minus1)?; // The expectations obtained from pytorch. let expected = Tensor::new(&[[3.4076, 6.4076], [-998.5924, 1001.4076]], &Device::Cpu)?; assert_eq!(output.dims(), expected.dims()); assert_close(&output.flatten_all()?, &expected.flatten_all()?, 0.00001)?; assert_eq!( input.log_sum_exp((0, 1))?.to_vec1::<f64>()?, [1000.0, 999.0, 1001.0] ); assert_eq!( input.log_sum_exp(())?.to_vec3::<f64>()?, input.to_vec3::<f64>()? ); Ok(()) } #[test] fn pow() -> Result<()> { let lhs = Tensor::new(&[[1f32, 2., 3.], [4., 5., 6.]], &Device::Cpu)?; let rhs = (&lhs - 2.)?; let res = lhs.pow(&rhs)?; assert_eq!( test_utils::to_vec2_round(&res, 3)?, [[1.0, 1.0, 3.0], [16.0, 125.0, 1296.0]] ); Ok(()) }
candle/candle-core/tests/tensor_tests.rs/0
{ "file_path": "candle/candle-core/tests/tensor_tests.rs", "repo_id": "candle", "token_count": 33042 }
* candle-codegeex4_9b THUDM/CodeGeeX4 is a versatile model for all AI software development scenarios, including code completion, code interpreter, web search, function calling, repository-level Q&A and much more. - [[https://github.com/THUDM/CodeGeeX4][Github]] - [[https://codegeex.cn/][HomePage]] - [[https://huggingface.co/THUDM/codegeex4-all-9b][huggingface]] ** Running with ~cuda~ #+begin_src shell cargo run --example codegeex4-9b --release --features cuda -- --prompt "please write a insertion sort in rust" --sample-len 300 #+end_src ** Running with ~cpu~ #+begin_src shell cargo run --example codegeex4-9b --release -- --cpu --prompt "please write a insertion sort in rust" --sample-len 300 #+end_src ** Output_Example *** Input #+begin_src shell cargo run --release --features cuda -- --prompt 'please write a FFT in rust' --sample-len 500 --cache /root/autodl-tmp #+end_src *** Output #+begin_src shell avx: false, neon: false, simd128: false, f16c: false temp: 0.95 repeat-penalty: 1.10 repeat-last-n: 64 cache path /root/autodl-tmp Prompt: [please write a FFT in rust] Using Seed 11511762269791786684 DType is BF16 transofrmer layers create 模型加载完毕 4 starting the inference loop 开始生成 samplelen 500 500 tokens generated (34.60 token/s) Result: Sure, I can help you with that. Here's an example of a Fast Fourier Transform (FFT) implementation in Rust: ```rust use num_complex::Complex; fn fft(input: &[Complex<f64> > ] ) -> Vec<Complex<f64> > > { let n = input.len(); if n == 1 { return vec![input[0]]]; } let mut even = vec![]; let mut odd = vec![]; for i in 0..n { if i % 2 == 0 { even.push(input[i]); } else { odd.push(input[i]); } } let even_fft = fft(&even); let odd_fft = fft(&odd); let mut output = vec![]; for k in 0..n/2 { let t = Complex::new(0.0, -2.0 * std::f64::consts::PI * (k as f64) / (n as f64))) ).exp(); output.push(even_fft[k] + odd_fft[k] * t]); output.push(even_fft[k] - odd_fft[k] * t]); } return output; } ``` This implementation uses the Cooley-Tukey algorithm to perform the FFT. The function takes an array of complex numbers and returns an array of complex numbers which is the result of the FFT. #+end_src * Citation #+begin_src @inproceedings{zheng2023codegeex, title={CodeGeeX: A Pre-Trained Model for Code Generation with Multilingual Benchmarking on HumanEval-X}, author={Qinkai Zheng and Xiao Xia and Xu Zou and Yuxiao Dong and Shan Wang and Yufei Xue and Zihan Wang and Lei Shen and Andi Wang and Yang Li and Teng Su and Zhilin Yang and Jie Tang}, booktitle={Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining}, pages={5673--5684}, year={2023} } #+end_src
candle/candle-examples/examples/codegeex4-9b/README.org/0
{ "file_path": "candle/candle-examples/examples/codegeex4-9b/README.org", "repo_id": "candle", "token_count": 1132 }
// TODO: Add an offline mode. #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::{Error as E, Result}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; use candle_transformers::models::falcon::{Config, Falcon}; struct TextGeneration { model: Falcon, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } struct GenerationOptions { temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { fn new( model: Falcon, tokenizer: Tokenizer, generation_options: GenerationOptions, seed: u64, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, generation_options.temp, generation_options.top_p); let repeat_penalty = generation_options.repeat_penalty; let repeat_last_n = generation_options.repeat_last_n; Self { model, tokenizer, logits_processor, device: device.clone(), repeat_penalty, repeat_last_n, } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { println!("starting the inference loop"); let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut new_tokens = vec![]; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let start_gen = std::time::Instant::now(); let context_size = if self.model.config().use_cache && index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); new_tokens.push(next_token); println!("> {:?}", start_gen.elapsed()); println!( "{} token: {} '{}'", index + 1, next_token, self.tokenizer.decode(&[next_token], true).map_err(E::msg)? ); } let dt = start_gen.elapsed(); println!( "{sample_len} tokens generated ({} token/s)\n----\n{}\n----", sample_len as f64 / dt.as_secs_f64(), self.tokenizer.decode(&new_tokens, true).map_err(E::msg)? ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] prompt: String, /// Use f32 computations rather than bf16. #[arg(long)] use_f32: bool, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, default_value_t = 100)] sample_len: usize, #[arg(long, default_value = "tiiuae/falcon-7b")] model_id: String, #[arg(long, default_value = "refs/pr/43")] revision: String, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.0)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id, RepoType::Model, args.revision, )); let tokenizer_filename = repo.get("tokenizer.json")?; let filenames = candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let dtype = if args.use_f32 { DType::F32 } else { DType::BF16 }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let config = Config::falcon7b(); config.validate()?; let model = Falcon::load(vb, config)?; println!("loaded the model in {:?}", start.elapsed()); let generation_options = GenerationOptions { temp: args.temperature, top_p: args.top_p, repeat_penalty: args.repeat_penalty, repeat_last_n: args.repeat_last_n, }; let mut pipeline = TextGeneration::new(model, tokenizer, generation_options, args.seed, &device); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/falcon/main.rs/0
{ "file_path": "candle/candle-examples/examples/falcon/main.rs", "repo_id": "candle", "token_count": 2723 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::{Parser, ValueEnum}; mod model; use model::{Config, Model}; use candle::{DType, Device, Module, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("<|endoftext|>") { Some(token) => token, None => anyhow::bail!("cannot find the </s> token"), }; let start_gen = std::time::Instant::now(); for _ in 0..sample_len { let input = Tensor::new(tokens.as_slice(), &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, ValueEnum, Clone, Copy, PartialEq, Eq, Debug)] enum Which { Mamba130m, Mamba370m, Mamba790m, Mamba1_4b, Mamba2_8b, Mamba2_8bSlimPj, } impl std::fmt::Display for Which { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } impl Which { fn model_id(&self) -> &'static str { match self { Self::Mamba130m => "state-spaces/mamba-130m", Self::Mamba370m => "state-spaces/mamba-370m", Self::Mamba790m => "state-spaces/mamba-790m", Self::Mamba1_4b => "state-spaces/mamba-1.4b", Self::Mamba2_8b => "state-spaces/mamba-2.8b", Self::Mamba2_8bSlimPj => "state-spaces/mamba-2.8b-slimpj'", } } fn revision(&self) -> &'static str { match self { Self::Mamba130m | Self::Mamba370m | Self::Mamba790m | Self::Mamba1_4b | Self::Mamba2_8bSlimPj => "refs/pr/1", Self::Mamba2_8b => "refs/pr/4", } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 5000)] sample_len: usize, #[arg(long, default_value = "mamba130m")] which: Which, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] weight_files: Option<String>, #[arg(long)] config_file: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id .unwrap_or_else(|| args.which.model_id().to_string()), RepoType::Model, args.revision .unwrap_or_else(|| args.which.revision().to_string()), )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => api .model("EleutherAI/gpt-neox-20b".to_string()) .get("tokenizer.json")?, }; let config_filename = match args.config_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("config.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => { vec![repo.get("model.safetensors")?] } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config: Config = serde_json::from_slice(&std::fs::read(config_filename)?)?; let device = candle_examples::device(args.cpu)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; let model = Model::new(&config, vb.pp("backbone"))?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/mamba-minimal/main.rs/0
{ "file_path": "candle/candle-examples/examples/mamba-minimal/main.rs", "repo_id": "candle", "token_count": 4087 }
// This should reach 91.5% accuracy. #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use rand::prelude::*; use candle::{DType, Result, Tensor, D}; use candle_nn::{loss, ops, Conv2d, Linear, Module, ModuleT, Optimizer, VarBuilder, VarMap}; const IMAGE_DIM: usize = 784; const LABELS: usize = 10; fn linear_z(in_dim: usize, out_dim: usize, vs: VarBuilder) -> Result<Linear> { let ws = vs.get_with_hints((out_dim, in_dim), "weight", candle_nn::init::ZERO)?; let bs = vs.get_with_hints(out_dim, "bias", candle_nn::init::ZERO)?; Ok(Linear::new(ws, Some(bs))) } trait Model: Sized { fn new(vs: VarBuilder) -> Result<Self>; fn forward(&self, xs: &Tensor) -> Result<Tensor>; } struct LinearModel { linear: Linear, } impl Model for LinearModel { fn new(vs: VarBuilder) -> Result<Self> { let linear = linear_z(IMAGE_DIM, LABELS, vs)?; Ok(Self { linear }) } fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.linear.forward(xs) } } struct Mlp { ln1: Linear, ln2: Linear, } impl Model for Mlp { fn new(vs: VarBuilder) -> Result<Self> { let ln1 = candle_nn::linear(IMAGE_DIM, 100, vs.pp("ln1"))?; let ln2 = candle_nn::linear(100, LABELS, vs.pp("ln2"))?; Ok(Self { ln1, ln2 }) } fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.ln1.forward(xs)?; let xs = xs.relu()?; self.ln2.forward(&xs) } } #[derive(Debug)] struct ConvNet { conv1: Conv2d, conv2: Conv2d, fc1: Linear, fc2: Linear, dropout: candle_nn::Dropout, } impl ConvNet { fn new(vs: VarBuilder) -> Result<Self> { let conv1 = candle_nn::conv2d(1, 32, 5, Default::default(), vs.pp("c1"))?; let conv2 = candle_nn::conv2d(32, 64, 5, Default::default(), vs.pp("c2"))?; let fc1 = candle_nn::linear(1024, 1024, vs.pp("fc1"))?; let fc2 = candle_nn::linear(1024, LABELS, vs.pp("fc2"))?; let dropout = candle_nn::Dropout::new(0.5); Ok(Self { conv1, conv2, fc1, fc2, dropout, }) } fn forward(&self, xs: &Tensor, train: bool) -> Result<Tensor> { let (b_sz, _img_dim) = xs.dims2()?; let xs = xs .reshape((b_sz, 1, 28, 28))? .apply(&self.conv1)? .max_pool2d(2)? .apply(&self.conv2)? .max_pool2d(2)? .flatten_from(1)? .apply(&self.fc1)? .relu()?; self.dropout.forward_t(&xs, train)?.apply(&self.fc2) } } struct TrainingArgs { learning_rate: f64, load: Option<String>, save: Option<String>, epochs: usize, } fn training_loop_cnn( m: candle_datasets::vision::Dataset, args: &TrainingArgs, ) -> anyhow::Result<()> { const BSIZE: usize = 64; let dev = candle::Device::cuda_if_available(0)?; let train_labels = m.train_labels; let train_images = m.train_images.to_device(&dev)?; let train_labels = train_labels.to_dtype(DType::U32)?.to_device(&dev)?; let mut varmap = VarMap::new(); let vs = VarBuilder::from_varmap(&varmap, DType::F32, &dev); let model = ConvNet::new(vs.clone())?; if let Some(load) = &args.load { println!("loading weights from {load}"); varmap.load(load)? } let adamw_params = candle_nn::ParamsAdamW { lr: args.learning_rate, ..Default::default() }; let mut opt = candle_nn::AdamW::new(varmap.all_vars(), adamw_params)?; let test_images = m.test_images.to_device(&dev)?; let test_labels = m.test_labels.to_dtype(DType::U32)?.to_device(&dev)?; let n_batches = train_images.dim(0)? / BSIZE; let mut batch_idxs = (0..n_batches).collect::<Vec<usize>>(); for epoch in 1..args.epochs { let mut sum_loss = 0f32; batch_idxs.shuffle(&mut thread_rng()); for batch_idx in batch_idxs.iter() { let train_images = train_images.narrow(0, batch_idx * BSIZE, BSIZE)?; let train_labels = train_labels.narrow(0, batch_idx * BSIZE, BSIZE)?; let logits = model.forward(&train_images, true)?; let log_sm = ops::log_softmax(&logits, D::Minus1)?; let loss = loss::nll(&log_sm, &train_labels)?; opt.backward_step(&loss)?; sum_loss += loss.to_vec0::<f32>()?; } let avg_loss = sum_loss / n_batches as f32; let test_logits = model.forward(&test_images, false)?; let sum_ok = test_logits .argmax(D::Minus1)? .eq(&test_labels)? .to_dtype(DType::F32)? .sum_all()? .to_scalar::<f32>()?; let test_accuracy = sum_ok / test_labels.dims1()? as f32; println!( "{epoch:4} train loss {:8.5} test acc: {:5.2}%", avg_loss, 100. * test_accuracy ); } if let Some(save) = &args.save { println!("saving trained weights in {save}"); varmap.save(save)? } Ok(()) } fn training_loop<M: Model>( m: candle_datasets::vision::Dataset, args: &TrainingArgs, ) -> anyhow::Result<()> { let dev = candle::Device::cuda_if_available(0)?; let train_labels = m.train_labels; let train_images = m.train_images.to_device(&dev)?; let train_labels = train_labels.to_dtype(DType::U32)?.to_device(&dev)?; let mut varmap = VarMap::new(); let vs = VarBuilder::from_varmap(&varmap, DType::F32, &dev); let model = M::new(vs.clone())?; if let Some(load) = &args.load { println!("loading weights from {load}"); varmap.load(load)? } let mut sgd = candle_nn::SGD::new(varmap.all_vars(), args.learning_rate)?; let test_images = m.test_images.to_device(&dev)?; let test_labels = m.test_labels.to_dtype(DType::U32)?.to_device(&dev)?; for epoch in 1..args.epochs { let logits = model.forward(&train_images)?; let log_sm = ops::log_softmax(&logits, D::Minus1)?; let loss = loss::nll(&log_sm, &train_labels)?; sgd.backward_step(&loss)?; let test_logits = model.forward(&test_images)?; let sum_ok = test_logits .argmax(D::Minus1)? .eq(&test_labels)? .to_dtype(DType::F32)? .sum_all()? .to_scalar::<f32>()?; let test_accuracy = sum_ok / test_labels.dims1()? as f32; println!( "{epoch:4} train loss: {:8.5} test acc: {:5.2}%", loss.to_scalar::<f32>()?, 100. * test_accuracy ); } if let Some(save) = &args.save { println!("saving trained weights in {save}"); varmap.save(save)? } Ok(()) } #[derive(ValueEnum, Clone)] enum WhichModel { Linear, Mlp, Cnn, } #[derive(Parser)] struct Args { #[clap(value_enum, default_value_t = WhichModel::Linear)] model: WhichModel, #[arg(long)] learning_rate: Option<f64>, #[arg(long, default_value_t = 200)] epochs: usize, /// The file where to save the trained weights, in safetensors format. #[arg(long)] save: Option<String>, /// The file where to load the trained weights from, in safetensors format. #[arg(long)] load: Option<String>, /// The directory where to load the dataset from, in ubyte format. #[arg(long)] local_mnist: Option<String>, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); // Load the dataset let m = if let Some(directory) = args.local_mnist { candle_datasets::vision::mnist::load_dir(directory)? } else { candle_datasets::vision::mnist::load()? }; println!("train-images: {:?}", m.train_images.shape()); println!("train-labels: {:?}", m.train_labels.shape()); println!("test-images: {:?}", m.test_images.shape()); println!("test-labels: {:?}", m.test_labels.shape()); let default_learning_rate = match args.model { WhichModel::Linear => 1., WhichModel::Mlp => 0.05, WhichModel::Cnn => 0.001, }; let training_args = TrainingArgs { epochs: args.epochs, learning_rate: args.learning_rate.unwrap_or(default_learning_rate), load: args.load, save: args.save, }; match args.model { WhichModel::Linear => training_loop::<LinearModel>(m, &training_args), WhichModel::Mlp => training_loop::<Mlp>(m, &training_args), WhichModel::Cnn => training_loop_cnn(m, &training_args), } }
candle/candle-examples/examples/mnist-training/main.rs/0
{ "file_path": "candle/candle-examples/examples/mnist-training/main.rs", "repo_id": "candle", "token_count": 4094 }
# candle-quantized-t5 ## Seq2Seq example This example uses a quantized version of the t5 model. ```bash $ cargo run --example quantized-t5 --release -- --prompt "translate to German: A beautiful candle." ... Eine schöne Kerze. ``` ## Generating Quantized weight files The weight file is automatically retrieved from the hub. It is also possible to generate quantized weight files from the original safetensors file by using the `tensor-tools` command line utility via: ```bash $ cargo run --bin tensor-tools --release -- quantize --quantization q6k PATH/TO/T5/model.safetensors /tmp/model.gguf ``` ## Using custom models To use a different model, specify the `model-id`. For example, for text editing, you can use quantized [CoEdit models](https://huggingface.co/jbochi/candle-coedit-quantized). ```bash $ cargo run --example quantized-t5 --release -- \ --model-id "jbochi/candle-coedit-quantized" \ --prompt "Make this text coherent: Their flight is weak. They run quickly through the tree canopy." \ --temperature 0 ... Although their flight is weak, they run quickly through the tree canopy. ``` By default, it will look for `model.gguf` and `config.json`, but you can specify custom local or remote `weight-file` and `config-file`s: ```bash cargo run --example quantized-t5 --release -- \ --model-id "jbochi/candle-coedit-quantized" \ --weight-file "model-xl.gguf" \ --config-file "config-xl.json" \ --prompt "Rewrite to make this easier to understand: Note that a storm surge is what forecasters consider a hurricane's most treacherous aspect." \ --temperature 0 ... Note that a storm surge is what forecasters consider a hurricane's most dangerous part. ``` ### [MADLAD-400](https://arxiv.org/abs/2309.04662) MADLAD-400 is a series of multilingual machine translation T5 models trained on 250 billion tokens covering over 450 languages using publicly available data. These models are competitive with significantly larger models. ```bash cargo run --example quantized-t5 --release -- \ --model-id "jbochi/madlad400-3b-mt" --weight-file "model-q4k.gguf" \ --prompt "<2de> How are you, my friend?" \ --temperature 0 ... Wie geht es dir, mein Freund? ```
candle/candle-examples/examples/quantized-t5/README.md/0
{ "file_path": "candle/candle-examples/examples/quantized-t5/README.md", "repo_id": "candle", "token_count": 683 }
//! Vectorized version of the gym environment. use candle::{DType, Device, Result, Tensor}; use pyo3::prelude::*; #[allow(unused)] #[derive(Debug)] pub struct Step { pub obs: Tensor, pub reward: Tensor, pub is_done: Tensor, } #[allow(unused)] pub struct VecGymEnv { env: PyObject, action_space: usize, observation_space: Vec<usize>, } fn w(res: PyErr) -> candle::Error { candle::Error::wrap(res) } #[allow(unused)] impl VecGymEnv { pub fn new(name: &str, img_dir: Option<&str>, nprocesses: usize) -> Result<VecGymEnv> { Python::with_gil(|py| { let sys = py.import_bound("sys")?; let path = sys.getattr("path")?; let _ = path.call_method1( "append", ("candle-examples/examples/reinforcement-learning",), )?; let gym = py.import_bound("atari_wrappers")?; let make = gym.getattr("make")?; let env = make.call1((name, img_dir, nprocesses))?; let action_space = env.getattr("action_space")?; let action_space = action_space.getattr("n")?.extract()?; let observation_space = env.getattr("observation_space")?; let observation_space: Vec<usize> = observation_space.getattr("shape")?.extract()?; let observation_space = [vec![nprocesses].as_slice(), observation_space.as_slice()].concat(); Ok(VecGymEnv { env: env.into(), action_space, observation_space, }) }) .map_err(w) } pub fn reset(&self) -> Result<Tensor> { let obs = Python::with_gil(|py| { let obs = self.env.call_method0(py, "reset")?; let obs = obs.call_method0(py, "flatten")?; obs.extract::<Vec<f32>>(py) }) .map_err(w)?; Tensor::new(obs, &Device::Cpu)?.reshape(self.observation_space.as_slice()) } pub fn step(&self, action: Vec<usize>) -> Result<Step> { let (obs, reward, is_done) = Python::with_gil(|py| { let step = self.env.call_method_bound(py, "step", (action,), None)?; let step = step.bind(py); let obs = step.get_item(0)?.call_method("flatten", (), None)?; let obs_buffer = pyo3::buffer::PyBuffer::get_bound(&obs)?; let obs: Vec<u8> = obs_buffer.to_vec(py)?; let reward: Vec<f32> = step.get_item(1)?.extract()?; let is_done: Vec<f32> = step.get_item(2)?.extract()?; Ok((obs, reward, is_done)) }) .map_err(w)?; let obs = Tensor::from_vec(obs, self.observation_space.as_slice(), &Device::Cpu)? .to_dtype(DType::F32)?; let reward = Tensor::new(reward, &Device::Cpu)?; let is_done = Tensor::new(is_done, &Device::Cpu)?; Ok(Step { obs, reward, is_done, }) } pub fn action_space(&self) -> usize { self.action_space } pub fn observation_space(&self) -> &[usize] { &self.observation_space } }
candle/candle-examples/examples/reinforcement-learning/vec_gym_env.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/vec_gym_env.rs", "repo_id": "candle", "token_count": 1572 }
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use candle_transformers::models::stable_diffusion; use std::ops::Div; use anyhow::{Error as E, Result}; use candle::{DType, Device, IndexOp, Module, Tensor, D}; use clap::Parser; use rand::Rng; use stable_diffusion::vae::AutoEncoderKL; use tokenizers::Tokenizer; #[derive(Parser)] #[command(author, version, about, long_about = None)] struct Args { /// The prompt to be used for image generation. #[arg( long, default_value = "A very realistic photo of a rusty robot walking on a sandy beach" )] prompt: String, #[arg(long, default_value = "")] uncond_prompt: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The height in pixels of the generated image. #[arg(long)] height: Option<usize>, /// The width in pixels of the generated image. #[arg(long)] width: Option<usize>, /// The UNet weight file, in .safetensors format. #[arg(long, value_name = "FILE")] unet_weights: Option<String>, /// The CLIP weight file, in .safetensors format. #[arg(long, value_name = "FILE")] clip_weights: Option<String>, /// The CLIP2 weight file, in .safetensors format. #[arg(long, value_name = "FILE")] clip2_weights: Option<String>, /// The VAE weight file, in .safetensors format. #[arg(long, value_name = "FILE")] vae_weights: Option<String>, #[arg(long, value_name = "FILE")] /// The file specifying the tokenizer to used for tokenization. tokenizer: Option<String>, /// The size of the sliced attention or 0 for automatic slicing (disabled by default) #[arg(long)] sliced_attention_size: Option<usize>, /// The number of steps to run the diffusion for. #[arg(long)] n_steps: Option<usize>, /// The number of samples to generate iteratively. #[arg(long, default_value_t = 1)] num_samples: usize, /// The numbers of samples to generate simultaneously. #[arg[long, default_value_t = 1]] bsize: usize, /// The name of the final image to generate. #[arg(long, value_name = "FILE", default_value = "sd_final.png")] final_image: String, #[arg(long, value_enum, default_value = "v2-1")] sd_version: StableDiffusionVersion, /// Generate intermediary images at each step. #[arg(long, action)] intermediary_images: bool, #[arg(long)] use_flash_attn: bool, #[arg(long)] use_f16: bool, #[arg(long)] guidance_scale: Option<f64>, /// Path to the mask image for inpainting. #[arg(long, value_name = "FILE")] mask_path: Option<String>, /// Path to the image used to initialize the latents. For inpainting, this is the image to be masked. #[arg(long, value_name = "FILE")] img2img: Option<String>, /// The strength, indicates how much to transform the initial image. The /// value must be between 0 and 1, a value of 1 discards the initial image /// information. #[arg(long, default_value_t = 0.8)] img2img_strength: f64, /// The seed to use when generating random samples. #[arg(long)] seed: Option<u64>, /// Force the saved image to update only the masked region #[arg(long)] only_update_masked: bool, } #[derive(Debug, Clone, Copy, clap::ValueEnum, PartialEq, Eq)] enum StableDiffusionVersion { V1_5, V1_5Inpaint, V2_1, V2Inpaint, Xl, XlInpaint, Turbo, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum ModelFile { Tokenizer, Tokenizer2, Clip, Clip2, Unet, Vae, } impl StableDiffusionVersion { fn repo(&self) -> &'static str { match self { Self::XlInpaint => "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", Self::Xl => "stabilityai/stable-diffusion-xl-base-1.0", Self::V2Inpaint => "stabilityai/stable-diffusion-2-inpainting", Self::V2_1 => "stabilityai/stable-diffusion-2-1", Self::V1_5 => "runwayml/stable-diffusion-v1-5", Self::V1_5Inpaint => "stable-diffusion-v1-5/stable-diffusion-inpainting", Self::Turbo => "stabilityai/sdxl-turbo", } } fn unet_file(&self, use_f16: bool) -> &'static str { match self { Self::V1_5 | Self::V1_5Inpaint | Self::V2_1 | Self::V2Inpaint | Self::Xl | Self::XlInpaint | Self::Turbo => { if use_f16 { "unet/diffusion_pytorch_model.fp16.safetensors" } else { "unet/diffusion_pytorch_model.safetensors" } } } } fn vae_file(&self, use_f16: bool) -> &'static str { match self { Self::V1_5 | Self::V1_5Inpaint | Self::V2_1 | Self::V2Inpaint | Self::Xl | Self::XlInpaint | Self::Turbo => { if use_f16 { "vae/diffusion_pytorch_model.fp16.safetensors" } else { "vae/diffusion_pytorch_model.safetensors" } } } } fn clip_file(&self, use_f16: bool) -> &'static str { match self { Self::V1_5 | Self::V1_5Inpaint | Self::V2_1 | Self::V2Inpaint | Self::Xl | Self::XlInpaint | Self::Turbo => { if use_f16 { "text_encoder/model.fp16.safetensors" } else { "text_encoder/model.safetensors" } } } } fn clip2_file(&self, use_f16: bool) -> &'static str { match self { Self::V1_5 | Self::V1_5Inpaint | Self::V2_1 | Self::V2Inpaint | Self::Xl | Self::XlInpaint | Self::Turbo => { if use_f16 { "text_encoder_2/model.fp16.safetensors" } else { "text_encoder_2/model.safetensors" } } } } } impl ModelFile { fn get( &self, filename: Option<String>, version: StableDiffusionVersion, use_f16: bool, ) -> Result<std::path::PathBuf> { use hf_hub::api::sync::Api; match filename { Some(filename) => Ok(std::path::PathBuf::from(filename)), None => { let (repo, path) = match self { Self::Tokenizer => { let tokenizer_repo = match version { StableDiffusionVersion::V1_5 | StableDiffusionVersion::V2_1 | StableDiffusionVersion::V1_5Inpaint | StableDiffusionVersion::V2Inpaint => "openai/clip-vit-base-patch32", StableDiffusionVersion::Xl | StableDiffusionVersion::XlInpaint | StableDiffusionVersion::Turbo => { // This seems similar to the patch32 version except some very small // difference in the split regex. "openai/clip-vit-large-patch14" } }; (tokenizer_repo, "tokenizer.json") } Self::Tokenizer2 => { ("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", "tokenizer.json") } Self::Clip => (version.repo(), version.clip_file(use_f16)), Self::Clip2 => (version.repo(), version.clip2_file(use_f16)), Self::Unet => (version.repo(), version.unet_file(use_f16)), Self::Vae => { // Override for SDXL when using f16 weights. // See https://github.com/huggingface/candle/issues/1060 if matches!( version, StableDiffusionVersion::Xl | StableDiffusionVersion::Turbo, ) && use_f16 { ( "madebyollin/sdxl-vae-fp16-fix", "diffusion_pytorch_model.safetensors", ) } else { (version.repo(), version.vae_file(use_f16)) } } }; let filename = Api::new()?.model(repo.to_string()).get(path)?; Ok(filename) } } } } fn output_filename( basename: &str, sample_idx: usize, num_samples: usize, timestep_idx: Option<usize>, ) -> String { let filename = if num_samples > 1 { match basename.rsplit_once('.') { None => format!("{basename}.{sample_idx}.png"), Some((filename_no_extension, extension)) => { format!("{filename_no_extension}.{sample_idx}.{extension}") } } } else { basename.to_string() }; match timestep_idx { None => filename, Some(timestep_idx) => match filename.rsplit_once('.') { None => format!("{filename}-{timestep_idx}.png"), Some((filename_no_extension, extension)) => { format!("{filename_no_extension}-{timestep_idx}.{extension}") } }, } } #[allow(clippy::too_many_arguments)] fn save_image( vae: &AutoEncoderKL, latents: &Tensor, vae_scale: f64, bsize: usize, idx: usize, final_image: &str, num_samples: usize, timestep_ids: Option<usize>, ) -> Result<()> { let images = vae.decode(&(latents / vae_scale)?)?; let images = ((images / 2.)? + 0.5)?.to_device(&Device::Cpu)?; let images = (images.clamp(0f32, 1.)? * 255.)?.to_dtype(DType::U8)?; for batch in 0..bsize { let image = images.i(batch)?; let image_filename = output_filename( final_image, (bsize * idx) + batch + 1, batch + num_samples, timestep_ids, ); candle_examples::save_image(&image, image_filename)?; } Ok(()) } #[allow(clippy::too_many_arguments)] fn text_embeddings( prompt: &str, uncond_prompt: &str, tokenizer: Option<String>, clip_weights: Option<String>, clip2_weights: Option<String>, sd_version: StableDiffusionVersion, sd_config: &stable_diffusion::StableDiffusionConfig, use_f16: bool, device: &Device, dtype: DType, use_guide_scale: bool, first: bool, ) -> Result<Tensor> { let tokenizer_file = if first { ModelFile::Tokenizer } else { ModelFile::Tokenizer2 }; let tokenizer = tokenizer_file.get(tokenizer, sd_version, use_f16)?; let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let pad_id = match &sd_config.clip.pad_with { Some(padding) => *tokenizer.get_vocab(true).get(padding.as_str()).unwrap(), None => *tokenizer.get_vocab(true).get("<|endoftext|>").unwrap(), }; println!("Running with prompt \"{prompt}\"."); let mut tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); if tokens.len() > sd_config.clip.max_position_embeddings { anyhow::bail!( "the prompt is too long, {} > max-tokens ({})", tokens.len(), sd_config.clip.max_position_embeddings ) } while tokens.len() < sd_config.clip.max_position_embeddings { tokens.push(pad_id) } let tokens = Tensor::new(tokens.as_slice(), device)?.unsqueeze(0)?; println!("Building the Clip transformer."); let clip_weights_file = if first { ModelFile::Clip } else { ModelFile::Clip2 }; let clip_weights = if first { clip_weights_file.get(clip_weights, sd_version, use_f16)? } else { clip_weights_file.get(clip2_weights, sd_version, use_f16)? }; let clip_config = if first { &sd_config.clip } else { sd_config.clip2.as_ref().unwrap() }; let text_model = stable_diffusion::build_clip_transformer(clip_config, clip_weights, device, DType::F32)?; let text_embeddings = text_model.forward(&tokens)?; let text_embeddings = if use_guide_scale { let mut uncond_tokens = tokenizer .encode(uncond_prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); if uncond_tokens.len() > sd_config.clip.max_position_embeddings { anyhow::bail!( "the negative prompt is too long, {} > max-tokens ({})", uncond_tokens.len(), sd_config.clip.max_position_embeddings ) } while uncond_tokens.len() < sd_config.clip.max_position_embeddings { uncond_tokens.push(pad_id) } let uncond_tokens = Tensor::new(uncond_tokens.as_slice(), device)?.unsqueeze(0)?; let uncond_embeddings = text_model.forward(&uncond_tokens)?; Tensor::cat(&[uncond_embeddings, text_embeddings], 0)?.to_dtype(dtype)? } else { text_embeddings.to_dtype(dtype)? }; Ok(text_embeddings) } fn image_preprocess<T: AsRef<std::path::Path>>(path: T) -> anyhow::Result<Tensor> { let img = image::ImageReader::open(path)?.decode()?; let (height, width) = (img.height() as usize, img.width() as usize); let height = height - height % 32; let width = width - width % 32; let img = img.resize_to_fill( width as u32, height as u32, image::imageops::FilterType::CatmullRom, ); let img = img.to_rgb8(); let img = img.into_raw(); let img = Tensor::from_vec(img, (height, width, 3), &Device::Cpu)? .permute((2, 0, 1))? .to_dtype(DType::F32)? .affine(2. / 255., -1.)? .unsqueeze(0)?; Ok(img) } /// Convert the mask image to a single channel tensor. Also ensure the image is a multiple of 32 in both dimensions. fn mask_preprocess<T: AsRef<std::path::Path>>(path: T) -> anyhow::Result<Tensor> { let img = image::open(path)?.to_luma8(); let (new_width, new_height) = { let (width, height) = img.dimensions(); (width - width % 32, height - height % 32) }; let img = image::imageops::resize( &img, new_width, new_height, image::imageops::FilterType::CatmullRom, ) .into_raw(); let mask = Tensor::from_vec(img, (new_height as usize, new_width as usize), &Device::Cpu)? .unsqueeze(0)? .to_dtype(DType::F32)? .div(255.0)? .unsqueeze(0)?; Ok(mask) } /// Generates the mask latents, scaled mask and mask_4 for inpainting. Returns a tuple of None if inpainting is not /// being used. #[allow(clippy::too_many_arguments)] fn inpainting_tensors( sd_version: StableDiffusionVersion, mask_path: Option<String>, dtype: DType, device: &Device, use_guide_scale: bool, vae: &AutoEncoderKL, image: Option<Tensor>, vae_scale: f64, ) -> Result<(Option<Tensor>, Option<Tensor>, Option<Tensor>)> { match sd_version { StableDiffusionVersion::XlInpaint | StableDiffusionVersion::V2Inpaint | StableDiffusionVersion::V1_5Inpaint => { let inpaint_mask = mask_path.ok_or_else(|| { anyhow::anyhow!("An inpainting model was requested but mask-path is not provided.") })?; // Get the mask image with shape [1, 1, 128, 128] let mask = mask_preprocess(inpaint_mask)? .to_device(device)? .to_dtype(dtype)?; // Generate the masked image from the image and the mask with shape [1, 3, 1024, 1024] let xmask = mask.le(0.5)?.repeat(&[1, 3, 1, 1])?.to_dtype(dtype)?; let image = &image .ok_or_else(|| anyhow::anyhow!( "An inpainting model was requested but img2img which is used as the input image is not provided." ))?; let masked_img = (image * xmask)?; // Scale down the mask let shape = masked_img.shape(); let (w, h) = (shape.dims()[3] / 8, shape.dims()[2] / 8); let mask = mask.interpolate2d(w, h)?; // shape: [1, 4, 128, 128] let mask_latents = vae.encode(&masked_img)?; let mask_latents = (mask_latents.sample()? * vae_scale)?.to_device(device)?; let mask_4 = mask.as_ref().repeat(&[1, 4, 1, 1])?; let (mask_latents, mask) = if use_guide_scale { ( Tensor::cat(&[&mask_latents, &mask_latents], 0)?, Tensor::cat(&[&mask, &mask], 0)?, ) } else { (mask_latents, mask) }; Ok((Some(mask_latents), Some(mask), Some(mask_4))) } _ => Ok((None, None, None)), } } fn run(args: Args) -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let Args { prompt, uncond_prompt, cpu, height, width, n_steps, tokenizer, final_image, sliced_attention_size, num_samples, bsize, sd_version, clip_weights, clip2_weights, vae_weights, unet_weights, tracing, use_f16, guidance_scale, use_flash_attn, mask_path, img2img, img2img_strength, seed, .. } = args; if !(0. ..=1.).contains(&img2img_strength) { anyhow::bail!("img2img-strength should be between 0 and 1, got {img2img_strength}") } let _guard = if tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let guidance_scale = match guidance_scale { Some(guidance_scale) => guidance_scale, None => match sd_version { StableDiffusionVersion::V1_5 | StableDiffusionVersion::V1_5Inpaint | StableDiffusionVersion::V2_1 | StableDiffusionVersion::V2Inpaint | StableDiffusionVersion::XlInpaint | StableDiffusionVersion::Xl => 7.5, StableDiffusionVersion::Turbo => 0., }, }; let n_steps = match n_steps { Some(n_steps) => n_steps, None => match sd_version { StableDiffusionVersion::V1_5 | StableDiffusionVersion::V1_5Inpaint | StableDiffusionVersion::V2_1 | StableDiffusionVersion::V2Inpaint | StableDiffusionVersion::XlInpaint | StableDiffusionVersion::Xl => 30, StableDiffusionVersion::Turbo => 1, }, }; let dtype = if use_f16 { DType::F16 } else { DType::F32 }; let sd_config = match sd_version { StableDiffusionVersion::V1_5 | StableDiffusionVersion::V1_5Inpaint => { stable_diffusion::StableDiffusionConfig::v1_5(sliced_attention_size, height, width) } StableDiffusionVersion::V2_1 | StableDiffusionVersion::V2Inpaint => { stable_diffusion::StableDiffusionConfig::v2_1(sliced_attention_size, height, width) } StableDiffusionVersion::Xl | StableDiffusionVersion::XlInpaint => { stable_diffusion::StableDiffusionConfig::sdxl(sliced_attention_size, height, width) } StableDiffusionVersion::Turbo => stable_diffusion::StableDiffusionConfig::sdxl_turbo( sliced_attention_size, height, width, ), }; let mut scheduler = sd_config.build_scheduler(n_steps)?; let device = candle_examples::device(cpu)?; // If a seed is not given, generate a random seed and print it let seed = seed.unwrap_or(rand::thread_rng().gen_range(0u64..u64::MAX)); println!("Using seed {seed}"); device.set_seed(seed)?; let use_guide_scale = guidance_scale > 1.0; let which = match sd_version { StableDiffusionVersion::Xl | StableDiffusionVersion::XlInpaint | StableDiffusionVersion::Turbo => vec![true, false], _ => vec![true], }; let text_embeddings = which .iter() .map(|first| { text_embeddings( &prompt, &uncond_prompt, tokenizer.clone(), clip_weights.clone(), clip2_weights.clone(), sd_version, &sd_config, use_f16, &device, dtype, use_guide_scale, *first, ) }) .collect::<Result<Vec<_>>>()?; let text_embeddings = Tensor::cat(&text_embeddings, D::Minus1)?; let text_embeddings = text_embeddings.repeat((bsize, 1, 1))?; println!("{text_embeddings:?}"); println!("Building the autoencoder."); let vae_weights = ModelFile::Vae.get(vae_weights, sd_version, use_f16)?; let vae = sd_config.build_vae(vae_weights, &device, dtype)?; let (image, init_latent_dist) = match &img2img { None => (None, None), Some(image) => { let image = image_preprocess(image)? .to_device(&device)? .to_dtype(dtype)?; (Some(image.clone()), Some(vae.encode(&image)?)) } }; println!("Building the unet."); let unet_weights = ModelFile::Unet.get(unet_weights, sd_version, use_f16)?; let in_channels = match sd_version { StableDiffusionVersion::XlInpaint | StableDiffusionVersion::V2Inpaint | StableDiffusionVersion::V1_5Inpaint => 9, _ => 4, }; let unet = sd_config.build_unet(unet_weights, &device, in_channels, use_flash_attn, dtype)?; let t_start = if img2img.is_some() { n_steps - (n_steps as f64 * img2img_strength) as usize } else { 0 }; let vae_scale = match sd_version { StableDiffusionVersion::V1_5 | StableDiffusionVersion::V1_5Inpaint | StableDiffusionVersion::V2_1 | StableDiffusionVersion::V2Inpaint | StableDiffusionVersion::XlInpaint | StableDiffusionVersion::Xl => 0.18215, StableDiffusionVersion::Turbo => 0.13025, }; let (mask_latents, mask, mask_4) = inpainting_tensors( sd_version, mask_path, dtype, &device, use_guide_scale, &vae, image, vae_scale, )?; for idx in 0..num_samples { let timesteps = scheduler.timesteps().to_vec(); let latents = match &init_latent_dist { Some(init_latent_dist) => { let latents = (init_latent_dist.sample()? * vae_scale)?.to_device(&device)?; if t_start < timesteps.len() { let noise = latents.randn_like(0f64, 1f64)?; scheduler.add_noise(&latents, noise, timesteps[t_start])? } else { latents } } None => { let latents = Tensor::randn( 0f32, 1f32, (bsize, 4, sd_config.height / 8, sd_config.width / 8), &device, )?; // scale the initial noise by the standard deviation required by the scheduler (latents * scheduler.init_noise_sigma())? } }; let mut latents = latents.to_dtype(dtype)?; println!("starting sampling"); for (timestep_index, &timestep) in timesteps.iter().enumerate() { if timestep_index < t_start { continue; } let start_time = std::time::Instant::now(); let latent_model_input = if use_guide_scale { Tensor::cat(&[&latents, &latents], 0)? } else { latents.clone() }; let latent_model_input = scheduler.scale_model_input(latent_model_input, timestep)?; let latent_model_input = match sd_version { StableDiffusionVersion::XlInpaint | StableDiffusionVersion::V2Inpaint | StableDiffusionVersion::V1_5Inpaint => Tensor::cat( &[ &latent_model_input, mask.as_ref().unwrap(), mask_latents.as_ref().unwrap(), ], 1, )?, _ => latent_model_input, } .to_device(&device)?; let noise_pred = unet.forward(&latent_model_input, timestep as f64, &text_embeddings)?; let noise_pred = if use_guide_scale { let noise_pred = noise_pred.chunk(2, 0)?; let (noise_pred_uncond, noise_pred_text) = (&noise_pred[0], &noise_pred[1]); (noise_pred_uncond + ((noise_pred_text - noise_pred_uncond)? * guidance_scale)?)? } else { noise_pred }; latents = scheduler.step(&noise_pred, timestep, &latents)?; let dt = start_time.elapsed().as_secs_f32(); println!("step {}/{n_steps} done, {:.2}s", timestep_index + 1, dt); // Replace all pixels in the unmasked region with the original pixels discarding any changes. if args.only_update_masked { let mask = mask_4.as_ref().unwrap(); let latent_to_keep = mask_latents .as_ref() .unwrap() .get_on_dim(0, 0)? // shape: [4, H, W] .unsqueeze(0)?; // shape: [1, 4, H, W] latents = ((&latents * mask)? + &latent_to_keep * (1.0 - mask))?; } if args.intermediary_images { save_image( &vae, &latents, vae_scale, bsize, idx, &final_image, num_samples, Some(timestep_index + 1), )?; } } println!( "Generating the final image for sample {}/{}.", idx + 1, num_samples ); save_image( &vae, &latents, vae_scale, bsize, idx, &final_image, num_samples, None, )?; } Ok(()) } fn main() -> Result<()> { let args = Args::parse(); run(args) }
candle/candle-examples/examples/stable-diffusion/main.rs/0
{ "file_path": "candle/candle-examples/examples/stable-diffusion/main.rs", "repo_id": "candle", "token_count": 14021 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::Parser; use candle::{DType, IndexOp, D}; use candle_nn::VarBuilder; use candle_transformers::models::vit; #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("google/vit-base-patch16-224".into()); api.get("model.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = vit::Model::new(&vit::Config::vit_base_patch16_224(), 1000, vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/vit/main.rs/0
{ "file_path": "candle/candle-examples/examples/vit/main.rs", "repo_id": "candle", "token_count": 762 }
use candle::{DType, Device, IndexOp, Result, Tensor}; use candle_nn::{batch_norm, conv2d, conv2d_no_bias, Func, Module, VarBuilder}; use std::collections::BTreeMap; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; #[derive(Debug)] struct Block { block_type: String, parameters: BTreeMap<String, String>, } impl Block { fn get(&self, key: &str) -> Result<&str> { match self.parameters.get(key) { None => candle::bail!("cannot find {} in {}", key, self.block_type), Some(value) => Ok(value), } } } #[derive(Debug)] pub struct Darknet { blocks: Vec<Block>, parameters: BTreeMap<String, String>, } impl Darknet { fn get(&self, key: &str) -> Result<&str> { match self.parameters.get(key) { None => candle::bail!("cannot find {} in net parameters", key), Some(value) => Ok(value), } } } struct Accumulator { block_type: Option<String>, parameters: BTreeMap<String, String>, net: Darknet, } impl Accumulator { fn new() -> Accumulator { Accumulator { block_type: None, parameters: BTreeMap::new(), net: Darknet { blocks: vec![], parameters: BTreeMap::new(), }, } } fn finish_block(&mut self) { match &self.block_type { None => (), Some(block_type) => { if block_type == "net" { self.net.parameters = self.parameters.clone(); } else { let block = Block { block_type: block_type.to_string(), parameters: self.parameters.clone(), }; self.net.blocks.push(block); } self.parameters.clear(); } } self.block_type = None; } } pub fn parse_config<T: AsRef<Path>>(path: T) -> Result<Darknet> { let file = File::open(path.as_ref())?; let mut acc = Accumulator::new(); for line in BufReader::new(file).lines() { let line = line?; if line.is_empty() || line.starts_with('#') { continue; } let line = line.trim(); if line.starts_with('[') { if !line.ends_with(']') { candle::bail!("line does not end with ']' {line}") } let line = &line[1..line.len() - 1]; acc.finish_block(); acc.block_type = Some(line.to_string()); } else { let key_value: Vec<&str> = line.splitn(2, '=').collect(); if key_value.len() != 2 { candle::bail!("missing equal {line}") } let prev = acc.parameters.insert( key_value[0].trim().to_owned(), key_value[1].trim().to_owned(), ); if prev.is_some() { candle::bail!("multiple value for key {}", line) } } } acc.finish_block(); Ok(acc.net) } enum Bl { Layer(Box<dyn candle_nn::Module + Send + Sync>), Route(Vec<usize>), Shortcut(usize), Yolo(usize, Vec<(usize, usize)>), } fn conv(vb: VarBuilder, index: usize, p: usize, b: &Block) -> Result<(usize, Bl)> { let activation = b.get("activation")?; let filters = b.get("filters")?.parse::<usize>()?; let pad = b.get("pad")?.parse::<usize>()?; let size = b.get("size")?.parse::<usize>()?; let stride = b.get("stride")?.parse::<usize>()?; let padding = if pad != 0 { (size - 1) / 2 } else { 0 }; let (bn, bias) = match b.parameters.get("batch_normalize") { Some(p) if p.parse::<usize>()? != 0 => { let bn = batch_norm(filters, 1e-5, vb.pp(format!("batch_norm_{index}")))?; (Some(bn), false) } Some(_) | None => (None, true), }; let conv_cfg = candle_nn::Conv2dConfig { stride, padding, groups: 1, dilation: 1, }; let conv = if bias { conv2d(p, filters, size, conv_cfg, vb.pp(format!("conv_{index}")))? } else { conv2d_no_bias(p, filters, size, conv_cfg, vb.pp(format!("conv_{index}")))? }; let leaky = match activation { "leaky" => true, "linear" => false, otherwise => candle::bail!("unsupported activation {}", otherwise), }; let func = candle_nn::func(move |xs| { let xs = conv.forward(xs)?; let xs = match &bn { Some(bn) => xs.apply_t(bn, false)?, None => xs, }; let xs = if leaky { xs.maximum(&(&xs * 0.1)?)? } else { xs }; Ok(xs) }); Ok((filters, Bl::Layer(Box::new(func)))) } fn upsample(prev_channels: usize) -> Result<(usize, Bl)> { let layer = candle_nn::func(|xs| { let (_n, _c, h, w) = xs.dims4()?; xs.upsample_nearest2d(2 * h, 2 * w) }); Ok((prev_channels, Bl::Layer(Box::new(layer)))) } fn int_list_of_string(s: &str) -> Result<Vec<i64>> { let res: std::result::Result<Vec<_>, _> = s.split(',').map(|xs| xs.trim().parse::<i64>()).collect(); Ok(res?) } fn usize_of_index(index: usize, i: i64) -> usize { if i >= 0 { i as usize } else { (index as i64 + i) as usize } } fn route(index: usize, p: &[(usize, Bl)], block: &Block) -> Result<(usize, Bl)> { let layers = int_list_of_string(block.get("layers")?)?; let layers: Vec<usize> = layers .into_iter() .map(|l| usize_of_index(index, l)) .collect(); let channels = layers.iter().map(|&l| p[l].0).sum(); Ok((channels, Bl::Route(layers))) } fn shortcut(index: usize, p: usize, block: &Block) -> Result<(usize, Bl)> { let from = block.get("from")?.parse::<i64>()?; Ok((p, Bl::Shortcut(usize_of_index(index, from)))) } fn yolo(p: usize, block: &Block) -> Result<(usize, Bl)> { let classes = block.get("classes")?.parse::<usize>()?; let flat = int_list_of_string(block.get("anchors")?)?; if flat.len() % 2 != 0 { candle::bail!("even number of anchors"); } let flat = flat.into_iter().map(|i| i as usize).collect::<Vec<_>>(); let anchors: Vec<_> = (0..(flat.len() / 2)) .map(|i| (flat[2 * i], flat[2 * i + 1])) .collect(); let mask = int_list_of_string(block.get("mask")?)?; let anchors = mask.into_iter().map(|i| anchors[i as usize]).collect(); Ok((p, Bl::Yolo(classes, anchors))) } fn detect( xs: &Tensor, image_height: usize, classes: usize, anchors: &[(usize, usize)], ) -> Result<Tensor> { let (bsize, _channels, height, _width) = xs.dims4()?; let stride = image_height / height; let grid_size = image_height / stride; let bbox_attrs = 5 + classes; let nanchors = anchors.len(); let xs = xs .reshape((bsize, bbox_attrs * nanchors, grid_size * grid_size))? .transpose(1, 2)? .contiguous()? .reshape((bsize, grid_size * grid_size * nanchors, bbox_attrs))?; let grid = Tensor::arange(0u32, grid_size as u32, &Device::Cpu)?; let a = grid.repeat((grid_size, 1))?; let b = a.t()?.contiguous()?; let x_offset = a.flatten_all()?.unsqueeze(1)?; let y_offset = b.flatten_all()?.unsqueeze(1)?; let xy_offset = Tensor::cat(&[&x_offset, &y_offset], 1)? .repeat((1, nanchors))? .reshape((grid_size * grid_size * nanchors, 2))? .unsqueeze(0)? .to_dtype(DType::F32)?; let anchors: Vec<f32> = anchors .iter() .flat_map(|&(x, y)| vec![x as f32 / stride as f32, y as f32 / stride as f32].into_iter()) .collect(); let anchors = Tensor::new(anchors.as_slice(), &Device::Cpu)? .reshape((anchors.len() / 2, 2))? .repeat((grid_size * grid_size, 1))? .unsqueeze(0)?; let ys02 = xs.i((.., .., 0..2))?; let ys24 = xs.i((.., .., 2..4))?; let ys4 = xs.i((.., .., 4..))?; let ys02 = (candle_nn::ops::sigmoid(&ys02)?.add(&xy_offset)? * stride as f64)?; let ys24 = (ys24.exp()?.mul(&anchors)? * stride as f64)?; let ys4 = candle_nn::ops::sigmoid(&ys4)?; let ys = Tensor::cat(&[ys02, ys24, ys4], 2)?; Ok(ys) } impl Darknet { pub fn height(&self) -> Result<usize> { let image_height = self.get("height")?.parse::<usize>()?; Ok(image_height) } pub fn width(&self) -> Result<usize> { let image_width = self.get("width")?.parse::<usize>()?; Ok(image_width) } pub fn build_model(&self, vb: VarBuilder) -> Result<Func> { let mut blocks: Vec<(usize, Bl)> = vec![]; let mut prev_channels: usize = 3; for (index, block) in self.blocks.iter().enumerate() { let channels_and_bl = match block.block_type.as_str() { "convolutional" => conv(vb.pp(index.to_string()), index, prev_channels, block)?, "upsample" => upsample(prev_channels)?, "shortcut" => shortcut(index, prev_channels, block)?, "route" => route(index, &blocks, block)?, "yolo" => yolo(prev_channels, block)?, otherwise => candle::bail!("unsupported block type {}", otherwise), }; prev_channels = channels_and_bl.0; blocks.push(channels_and_bl); } let image_height = self.height()?; let func = candle_nn::func(move |xs| { let mut prev_ys: Vec<Tensor> = vec![]; let mut detections: Vec<Tensor> = vec![]; for (_, b) in blocks.iter() { let ys = match b { Bl::Layer(l) => { let xs = prev_ys.last().unwrap_or(xs); l.forward(xs)? } Bl::Route(layers) => { let layers: Vec<_> = layers.iter().map(|&i| &prev_ys[i]).collect(); Tensor::cat(&layers, 1)? } Bl::Shortcut(from) => (prev_ys.last().unwrap() + prev_ys.get(*from).unwrap())?, Bl::Yolo(classes, anchors) => { let xs = prev_ys.last().unwrap_or(xs); detections.push(detect(xs, image_height, *classes, anchors)?); Tensor::new(&[0u32], &Device::Cpu)? } }; prev_ys.push(ys); } Tensor::cat(&detections, 1) }); Ok(func) } }
candle/candle-examples/examples/yolo-v3/darknet.rs/0
{ "file_path": "candle/candle-examples/examples/yolo-v3/darknet.rs", "repo_id": "candle", "token_count": 5395 }
pub mod audio; pub mod bs1770; pub mod coco_classes; pub mod imagenet; pub mod token_output_stream; pub mod wav; use candle::utils::{cuda_is_available, metal_is_available}; use candle::{Device, Result, Tensor}; pub fn device(cpu: bool) -> Result<Device> { if cpu { Ok(Device::Cpu) } else if cuda_is_available() { Ok(Device::new_cuda(0)?) } else if metal_is_available() { Ok(Device::new_metal(0)?) } else { #[cfg(all(target_os = "macos", target_arch = "aarch64"))] { println!( "Running on CPU, to run on GPU(metal), build this example with `--features metal`" ); } #[cfg(not(all(target_os = "macos", target_arch = "aarch64")))] { println!("Running on CPU, to run on GPU, build this example with `--features cuda`"); } Ok(Device::Cpu) } } pub fn load_image<P: AsRef<std::path::Path>>( p: P, resize_longest: Option<usize>, ) -> Result<(Tensor, usize, usize)> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)?; let (initial_h, initial_w) = (img.height() as usize, img.width() as usize); let img = match resize_longest { None => img, Some(resize_longest) => { let (height, width) = (img.height(), img.width()); let resize_longest = resize_longest as u32; let (height, width) = if height < width { let h = (resize_longest * height) / width; (h, resize_longest) } else { let w = (resize_longest * width) / height; (resize_longest, w) }; img.resize_exact(width, height, image::imageops::FilterType::CatmullRom) } }; let (height, width) = (img.height() as usize, img.width() as usize); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (height, width, 3), &Device::Cpu)?.permute((2, 0, 1))?; Ok((data, initial_h, initial_w)) } pub fn load_image_and_resize<P: AsRef<std::path::Path>>( p: P, width: usize, height: usize, ) -> Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill( width as u32, height as u32, image::imageops::FilterType::Triangle, ); let img = img.to_rgb8(); let data = img.into_raw(); Tensor::from_vec(data, (width, height, 3), &Device::Cpu)?.permute((2, 0, 1)) } /// Saves an image to disk using the image crate, this expects an input with shape /// (c, height, width). pub fn save_image<P: AsRef<std::path::Path>>(img: &Tensor, p: P) -> Result<()> { let p = p.as_ref(); let (channel, height, width) = img.dims3()?; if channel != 3 { candle::bail!("save_image expects an input of shape (3, height, width)") } let img = img.permute((1, 2, 0))?.flatten_all()?; let pixels = img.to_vec1::<u8>()?; let image: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = match image::ImageBuffer::from_raw(width as u32, height as u32, pixels) { Some(image) => image, None => candle::bail!("error saving image {p:?}"), }; image.save(p).map_err(candle::Error::wrap)?; Ok(()) } pub fn save_image_resize<P: AsRef<std::path::Path>>( img: &Tensor, p: P, h: usize, w: usize, ) -> Result<()> { let p = p.as_ref(); let (channel, height, width) = img.dims3()?; if channel != 3 { candle::bail!("save_image expects an input of shape (3, height, width)") } let img = img.permute((1, 2, 0))?.flatten_all()?; let pixels = img.to_vec1::<u8>()?; let image: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = match image::ImageBuffer::from_raw(width as u32, height as u32, pixels) { Some(image) => image, None => candle::bail!("error saving image {p:?}"), }; let image = image::DynamicImage::from(image); let image = image.resize_to_fill(w as u32, h as u32, image::imageops::FilterType::CatmullRom); image.save(p).map_err(candle::Error::wrap)?; Ok(()) } /// Loads the safetensors files for a model from the hub based on a json index file. pub fn hub_load_safetensors( repo: &hf_hub::api::sync::ApiRepo, json_file: &str, ) -> Result<Vec<std::path::PathBuf>> { let json_file = repo.get(json_file).map_err(candle::Error::wrap)?; let json_file = std::fs::File::open(json_file)?; let json: serde_json::Value = serde_json::from_reader(&json_file).map_err(candle::Error::wrap)?; let weight_map = match json.get("weight_map") { None => candle::bail!("no weight map in {json_file:?}"), Some(serde_json::Value::Object(map)) => map, Some(_) => candle::bail!("weight map in {json_file:?} is not a map"), }; let mut safetensors_files = std::collections::HashSet::new(); for value in weight_map.values() { if let Some(file) = value.as_str() { safetensors_files.insert(file.to_string()); } } let safetensors_files = safetensors_files .iter() .map(|v| repo.get(v).map_err(candle::Error::wrap)) .collect::<Result<Vec<_>>>()?; Ok(safetensors_files) } pub fn hub_load_local_safetensors<P: AsRef<std::path::Path>>( path: P, json_file: &str, ) -> Result<Vec<std::path::PathBuf>> { let path = path.as_ref(); let jsfile = std::fs::File::open(path.join(json_file))?; let json: serde_json::Value = serde_json::from_reader(&jsfile).map_err(candle::Error::wrap)?; let weight_map = match json.get("weight_map") { None => candle::bail!("no weight map in {json_file:?}"), Some(serde_json::Value::Object(map)) => map, Some(_) => candle::bail!("weight map in {json_file:?} is not a map"), }; let mut safetensors_files = std::collections::HashSet::new(); for value in weight_map.values() { if let Some(file) = value.as_str() { safetensors_files.insert(file); } } let safetensors_files: Vec<_> = safetensors_files .into_iter() .map(|v| path.join(v)) .collect(); Ok(safetensors_files) }
candle/candle-examples/src/lib.rs/0
{ "file_path": "candle/candle-examples/src/lib.rs", "repo_id": "candle", "token_count": 2878 }
/****************************************************************************** * Copyright (c) 2024, Tri Dao. ******************************************************************************/ #pragma once #include "cute/tensor.hpp" #include "cutlass/cutlass.h" #include "cutlass/layout/layout.h" #include <cutlass/numeric_types.h> using namespace cute; template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, typename elem_type=cutlass::half_t> struct Flash_kernel_traits { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 using Element = elem_type; static constexpr bool Has_cp_async = true; #else using Element = cutlass::half_t; static constexpr bool Has_cp_async = false; #endif using ElementAccum = float; using index_t = int64_t; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 using MMA_Atom_Arch = std::conditional_t< std::is_same_v<elem_type, cutlass::half_t>, MMA_Atom<SM80_16x8x16_F32F16F16F32_TN>, MMA_Atom<SM80_16x8x16_F32BF16BF16F32_TN> >; #else using MMA_Atom_Arch = MMA_Atom<SM75_16x8x8_F32F16F16F32_TN>; #endif #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750 using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, elem_type>; using SmemCopyAtomTransposed = Copy_Atom<SM75_U16x8_LDSM_T, elem_type>; #else using SmemCopyAtom = Copy_Atom<DefaultCopy, elem_type>; using SmemCopyAtomTransposed = Copy_Atom<DefaultCopy, elem_type>; #endif }; // If Share_Q_K_smem is true, that forces Is_Q_in_regs to be true template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, bool Is_Q_in_regs_=false, bool Share_Q_K_smem_=false, typename elem_type=cutlass::half_t, typename Base=Flash_kernel_traits<kHeadDim_, kBlockM_, kBlockN_, kNWarps_, elem_type> > struct Flash_fwd_kernel_traits : public Base { using Element = typename Base::Element; using ElementAccum = typename Base::ElementAccum; using index_t = typename Base::index_t; static constexpr bool Has_cp_async = Base::Has_cp_async; using SmemCopyAtom = typename Base::SmemCopyAtom; using SmemCopyAtomTransposed = typename Base::SmemCopyAtomTransposed; static constexpr bool Share_Q_K_smem = Share_Q_K_smem_; static constexpr bool Is_Q_in_regs = Is_Q_in_regs_ || Share_Q_K_smem; // The number of threads. static constexpr int kNWarps = kNWarps_; static constexpr int kNThreads = kNWarps * 32; static constexpr int kBlockM = kBlockM_; static constexpr int kBlockN = kBlockN_; static constexpr int kHeadDim = kHeadDim_; static_assert(kHeadDim % 32 == 0); static constexpr int kBlockKSmem = kHeadDim % 64 == 0 ? 64 : 32; static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32); static constexpr int kSwizzle = kBlockKSmem == 32 ? 2 : 3; using TiledMma = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<kNWarps>,_1,_1>>, // 4x1x1 or 8x1x1 thread group Tile<Int<16 * kNWarps>, _16, _16>>; using SmemLayoutAtomQ = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, // This has to be kBlockKSmem, using kHeadDim gives wrong results for d=128 Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutQ = decltype(tile_to_shape( SmemLayoutAtomQ{}, Shape<Int<kBlockM>, Int<kHeadDim>>{})); using SmemLayoutKV = decltype(tile_to_shape( SmemLayoutAtomQ{}, Shape<Int<kBlockN>, Int<kHeadDim>>{})); // https://github.com/ColfaxResearch/cutlass-kernels/blob/a222587e6d59b93ba704853d3946fb686d8b8892/src/fmha/fmha_forward.cu#L434 using SmemLayoutVtransposed = decltype( composition(SmemLayoutKV{}, make_layout(Shape<Int<kHeadDim>, Int<kBlockN>>{}, GenRowMajor{}))); using SmemLayoutVtransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutVtransposed{})); using SmemLayoutAtomO = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<Int<8>, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutO = decltype(tile_to_shape( SmemLayoutAtomO{}, Shape<Int<kBlockM>, Int<kHeadDim>>{})); using SmemCopyAtomO = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, Element>; using SmemCopyAtomOaccum = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>; static constexpr int kSmemQSize = size(SmemLayoutQ{}) * sizeof(Element); static constexpr int kSmemKVSize = size(SmemLayoutKV{}) * 2 * sizeof(Element); static constexpr int kSmemSize = Share_Q_K_smem ? std::max(kSmemQSize, kSmemKVSize) : kSmemQSize + kSmemKVSize; static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element); static_assert(kHeadDim % kGmemElemsPerLoad == 0, "kHeadDim must be a multiple of kGmemElemsPerLoad"); // Using kBlockKSmem here is 6-10% faster than kBlockKGmem for d=128 because of bank conflicts. // For example, for d=128, smem is split into 2 "pages", each page takes care of columns // 0-63 and 64-127. If we have 16 threads per row for gmem read, when we write to smem, // thread 0 - 7 will write to the first page and thread 8 - 15 will write to the second page, // to the same banks. static constexpr int kGmemThreadsPerRow = kBlockKSmem / kGmemElemsPerLoad; static_assert(kNThreads % kGmemThreadsPerRow == 0, "kNThreads must be a multiple of kGmemThreadsPerRow"); using GmemLayoutAtom = Layout<Shape <Int<kNThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>, Stride<Int<kGmemThreadsPerRow>, _1>>; // We use CACHEGLOBAL instead of CACHEALWAYS for both Q and K/V, since we won't be reading // from the same address by the same threadblock. This is slightly faster. using Gmem_copy_struct = std::conditional_t< Has_cp_async, SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>, AutoVectorizingCopyWithAssumedAlignment<128> >; using GmemTiledCopyQKV = decltype( make_tiled_copy(Copy_Atom<Gmem_copy_struct, Element>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per read using GmemTiledCopyO = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, Element>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per store using GmemLayoutAtomOaccum = std::conditional_t< kBlockKSmem == 32, Layout<Shape <_16, _8>, // Thread layout, 8 threads per row Stride< _8, _1>>, Layout<Shape <_8, _16>, // Thread layout, 16 threads per row Stride< _16, _1>> >; using GmemTiledCopyOaccum = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>{}, GmemLayoutAtomOaccum{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per store using GmemLayoutAtomRotcossin = GmemLayoutAtom; using GmemTiledCopyRotcossin = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<uint64_t>, Element>{}, GmemLayoutAtomRotcossin{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per load using GmemTiledCopyRotcossinCont = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, Element>{}, GmemLayoutAtomRotcossin{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per load }; // Is_V_in_regs is an option to reduce smem usage, but will increase register pressue. // No_double_buffer is another option to reduce smem usage, but will slow things down. template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, int AtomLayoutMSdP_=1, int AtomLayoutNdKV=2, int AtomLayoutMdQ=2, bool Is_V_in_regs_=false, bool No_double_buffer_=false, typename elem_type=cutlass::half_t, typename Base=Flash_kernel_traits<kHeadDim_, kBlockM_, kBlockN_, kNWarps_, elem_type> > struct Flash_bwd_kernel_traits : public Base { using Element = typename Base::Element; using ElementAccum = typename Base::ElementAccum; using index_t = typename Base::index_t; static constexpr bool Has_cp_async = Base::Has_cp_async; using SmemCopyAtom = typename Base::SmemCopyAtom; using SmemCopyAtomTransposed = typename Base::SmemCopyAtomTransposed; static constexpr bool Is_V_in_regs = Is_V_in_regs_; static constexpr bool No_double_buffer = No_double_buffer_; // The number of threads. static constexpr int kNWarps = kNWarps_; static constexpr int kNThreads = kNWarps * 32; static constexpr int kBlockM = kBlockM_; static constexpr int kBlockN = kBlockN_; static constexpr int kHeadDim = kHeadDim_; static_assert(kHeadDim % 32 == 0); static constexpr int kBlockKSmem = kHeadDim % 64 == 0 ? 64 : 32; static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32); static constexpr int kSwizzle = kBlockKSmem == 32 ? 2 : 3; static constexpr int AtomLayoutMSdP = AtomLayoutMSdP_; static_assert(kNWarps % AtomLayoutMSdP == 0); static_assert(kNWarps % AtomLayoutNdKV == 0); static_assert(kNWarps % AtomLayoutMdQ == 0); using TiledMmaSdP = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<AtomLayoutMSdP>, Int<kNWarps / AtomLayoutMSdP>, _1>>, Tile<Int<16 * AtomLayoutMSdP>, Int<16 * kNWarps / AtomLayoutMSdP>, _16>>; using TiledMmadKV = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<AtomLayoutNdKV>, Int<kNWarps / AtomLayoutNdKV>, _1>>, Tile<Int<16 * AtomLayoutNdKV>, Int<16 * kNWarps / AtomLayoutNdKV>, _16>>; using TiledMmadQ = TiledMMA< typename Base::MMA_Atom_Arch, Layout<Shape<Int<AtomLayoutMdQ>, Int<kNWarps / AtomLayoutMdQ>, _1>>, // 2x4x1 or 4x2x1 thread group Tile<Int<16 * AtomLayoutMdQ>, Int<16 * kNWarps / AtomLayoutMdQ>, _16>>; using SmemLayoutAtomQdO = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutQdO = decltype(tile_to_shape( SmemLayoutAtomQdO{}, make_shape(Int<kBlockM>{}, Int<kHeadDim>{}))); using SmemLayoutAtomKV = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<Int<kBlockM / kNWarps>, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutKV = decltype(tile_to_shape( // SmemLayoutAtomQdO{}, SmemLayoutAtomKV{}, make_shape(Int<kBlockN>{}, Int<kHeadDim>{}))); using SmemLayoutKtransposed = decltype( composition(SmemLayoutKV{}, make_layout(Shape<Int<kHeadDim>, Int<kBlockN>>{}, GenRowMajor{}))); using SmemLayoutKtransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutKtransposed{})); // TODO: generalize to other values of kBlockN // TODO: what should be the Swizzle here? 3 is faster than 1, and 1 is faster than 2 // static constexpr int kPBlockN = kBlockN; // Temporarily disabling this for hdim 256 on sm86 and sm89 // static_assert(kBlockN >= 64); static_assert(kBlockN >= 32); // TD [2023-03-19]: Idk why kPBlockN = 16 and kSwizzlePdS=3 is the fastest. static constexpr int kPBlockN = kBlockN >= 64 ? 64 : 32; static_assert(kPBlockN == 16 || kPBlockN == 32 || kPBlockN == 64); // static constexpr int kSwizzlePdS = kPBlockN == 16 ? 1 : (kPBlockN == 32 ? 2 : 3); static constexpr int kSwizzlePdS = 3; using SmemLayoutAtomPdS = decltype( composition(Swizzle<kSwizzlePdS, 3, 3>{}, Layout<Shape<Int<kBlockM>, Int<kPBlockN>>, Stride<Int<kPBlockN>, _1>>{})); using SmemLayoutPdS = decltype(tile_to_shape( SmemLayoutAtomPdS{}, make_shape(Int<kBlockM>{}, Int<kBlockN>{}))); using SmemLayoutPdStransposed = decltype( composition(SmemLayoutPdS{}, make_layout(Shape<Int<kBlockN>, Int<kBlockM>>{}, GenRowMajor{}))); using SmemLayoutPdStransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutPdStransposed{})); using SmemCopyAtomPdS = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>; using SmemLayoutQdOtransposed = decltype( composition(SmemLayoutQdO{}, make_layout(Shape<Int<kHeadDim>, Int<kBlockM>>{}, GenRowMajor{}))); using SmemLayoutQdOtransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutQdOtransposed{})); using SmemLayoutAtomdKV = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutdKV = decltype(tile_to_shape( SmemLayoutAtomdKV{}, make_shape(Int<kBlockN>{}, Int<kHeadDim>{}))); using SmemCopyAtomdKV = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>; using SmemLayoutAtomdQ = decltype( composition(Swizzle<kSwizzle, 3, 3>{}, Layout<Shape<_8, Int<kBlockKSmem>>, Stride<Int<kBlockKSmem>, _1>>{})); using SmemLayoutdQ = decltype(tile_to_shape( SmemLayoutAtomdQ{}, make_shape(Int<kBlockM>{}, Int<kHeadDim>{}))); using SmemCopyAtomdQ = Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>; // Double buffer for sQ static constexpr int kSmemQdOSize = size(SmemLayoutQdO{}) * (No_double_buffer ? 2 : 3) * sizeof(Element); static constexpr int kSmemKVSize = size(SmemLayoutKV{}) * 2 * sizeof(Element); static constexpr int kSmemdSSize = size(SmemLayoutPdS{}) * sizeof(Element); static constexpr int kSmemPSize = size(SmemLayoutPdS{}) * sizeof(Element); static constexpr int kSmemdQSize = size(SmemLayoutdQ{}) * sizeof(Element); static constexpr int kSmemSize = kSmemQdOSize + (!Is_V_in_regs ? kSmemKVSize + kSmemdSSize + std::max(kSmemPSize, kSmemdQSize) : std::max(kSmemKVSize, kSmemKVSize / 2 + kSmemdSSize + std::max(kSmemPSize, kSmemdQSize))); static constexpr int kSmemSize1colblock = kSmemQdOSize + (!Is_V_in_regs ? kSmemKVSize + kSmemdSSize + kSmemPSize : std::max(kSmemKVSize, kSmemKVSize / 2 + kSmemdSSize + kSmemPSize)); static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element); static_assert(kHeadDim % kGmemElemsPerLoad == 0, "kHeadDim must be a multiple of kGmemElemsPerLoad"); // Using kBlockKSmem instead of kHeadDim here to avoid bank conflicts, but doesn't seem // to affect speed in practice. static constexpr int kGmemThreadsPerRow = kBlockKSmem / kGmemElemsPerLoad; static_assert(kNThreads % kGmemThreadsPerRow == 0, "kNThreads must be a multiple of kGmemThreadsPerRow"); using GmemLayoutAtom = Layout<Shape <Int<kNThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>, Stride<Int<kGmemThreadsPerRow>, _1>>; // We use CACHEGLOBAL instead of CACHEALWAYS for both Q and K/V, since we won't be reading // from the same address by the same threadblock. This is slightly faster. using Gmem_copy_struct = std::conditional_t< Has_cp_async, SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>, AutoVectorizingCopyWithAssumedAlignment<128> >; using GmemTiledCopyQKV = decltype( make_tiled_copy(Copy_Atom<Gmem_copy_struct, elem_type>{}, GmemLayoutAtom{}, Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per read using GmemTiledCopydO = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>{}, GmemLayoutAtom{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store using GmemTiledCopydKV = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>{}, GmemLayoutAtom{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store using GmemTiledCopydQ = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, elem_type>{}, GmemLayoutAtom{}, Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store using GmemLayoutAtomdQaccum = std::conditional_t< kBlockKSmem == 32, Layout<Shape <_32, _8>, // Thread layout, 8 threads per row Stride< _8, _1>>, Layout<Shape <_16, _16>, // Thread layout, 16 threads per row Stride< _16, _1>> >; using GmemTiledCopydQaccum = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>{}, GmemLayoutAtomdQaccum{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per store using GmemTiledCopydQaccumAtomicAdd = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>{}, Layout<Shape <_8, _32>, // Thread layout, 8 threads per row Stride<_32, _1>>{}, Layout<Shape < _1, _1>>{})); // Val layout, 1 val per store }; ////////////////////////////////////////////////////////////////////////////////////////////////////
candle/candle-flash-attn/kernels/kernel_traits.h/0
{ "file_path": "candle/candle-flash-attn/kernels/kernel_traits.h", "repo_id": "candle", "token_count": 7903 }
#include "binary_op_macros.cuh" #include<stdint.h> #if __CUDA_ARCH__ >= 800 BINARY_OP(__nv_bfloat16, badd_bf16, x + y) BINARY_OP(__nv_bfloat16, bdiv_bf16, x / y) BINARY_OP(__nv_bfloat16, bmul_bf16, x * y) BINARY_OP(__nv_bfloat16, bsub_bf16, x - y) BINARY_OP(__nv_bfloat16, bmaximum_bf16, maxg(x, y)) BINARY_OP(__nv_bfloat16, bminimum_bf16, ming(x, y)) BINARY_OP_OUT(__nv_bfloat16, uint8_t, eq_bf16, x == y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, ne_bf16, x != y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, lt_bf16, x < y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, le_bf16, x <= y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, gt_bf16, x > y) BINARY_OP_OUT(__nv_bfloat16, uint8_t, ge_bf16, x >= y) #endif #if __CUDA_ARCH__ >= 530 BINARY_OP(__half, badd_f16, x + y) BINARY_OP(__half, bdiv_f16, x / y) BINARY_OP(__half, bmul_f16, x * y) BINARY_OP(__half, bsub_f16, x - y) BINARY_OP(__half, bmaximum_f16, maxg(x, y)) BINARY_OP(__half, bminimum_f16, ming(x, y)) BINARY_OP_OUT(__half, uint8_t, eq_f16, x == y) BINARY_OP_OUT(__half, uint8_t, ne_f16, x != y) BINARY_OP_OUT(__half, uint8_t, lt_f16, x < y) BINARY_OP_OUT(__half, uint8_t, le_f16, x <= y) BINARY_OP_OUT(__half, uint8_t, gt_f16, x > y) BINARY_OP_OUT(__half, uint8_t, ge_f16, x >= y) #endif BINARY_OP(float, badd_f32, x + y) BINARY_OP(double, badd_f64, x + y); BINARY_OP(uint8_t, badd_u8, x + y); BINARY_OP(uint32_t, badd_u32, x + y); BINARY_OP(int64_t, badd_i64, x + y); BINARY_OP(float, bdiv_f32, x / y) BINARY_OP(double, bdiv_f64, x / y); BINARY_OP(uint8_t, bdiv_u8, x / y); BINARY_OP(uint32_t, bdiv_u32, x / y); BINARY_OP(int64_t, bdiv_i64, x / y); BINARY_OP(float, bmul_f32, x * y) BINARY_OP(double, bmul_f64, x * y); BINARY_OP(uint8_t, bmul_u8, x * y); BINARY_OP(uint32_t, bmul_u32, x * y); BINARY_OP(int64_t, bmul_i64, x * y); BINARY_OP(float, bsub_f32, x - y) BINARY_OP(double, bsub_f64, x - y); BINARY_OP(uint8_t, bsub_u8, x - y); BINARY_OP(uint32_t, bsub_u32, x - y); BINARY_OP(int64_t, bsub_i64, x - y); BINARY_OP(float, bminimum_f32, ming(x, y)); BINARY_OP(double, bminimum_f64, ming(x, y)); BINARY_OP(uint8_t, bminimum_u8, ming(x, y)); BINARY_OP(uint32_t, bminimum_u32, ming(x, y)); BINARY_OP(int64_t, bminimum_i64, ming(x, y)); BINARY_OP(float, bmaximum_f32, maxg(x, y)); BINARY_OP(double, bmaximum_f64, maxg(x, y)); BINARY_OP(uint8_t, bmaximum_u8, maxg(x, y)); BINARY_OP(uint32_t, bmaximum_u32, maxg(x, y)); BINARY_OP(int64_t, bmaximum_i64, maxg(x, y)); BINARY_OP_OUT(float, uint8_t, eq_f32, x == y) BINARY_OP_OUT(double, uint8_t, eq_f64, x == y) BINARY_OP_OUT(uint8_t, uint8_t, eq_u8, x == y) BINARY_OP_OUT(uint32_t, uint8_t, eq_u32, x == y) BINARY_OP_OUT(int64_t, uint8_t, eq_i64, x == y) BINARY_OP_OUT(float, uint8_t, ne_f32, x != y) BINARY_OP_OUT(double, uint8_t, ne_f64, x != y) BINARY_OP_OUT(uint8_t, uint8_t, ne_u8, x != y) BINARY_OP_OUT(uint32_t, uint8_t, ne_u32, x != y) BINARY_OP_OUT(int64_t, uint8_t, ne_i64, x != y) BINARY_OP_OUT(float, uint8_t, lt_f32, x < y) BINARY_OP_OUT(double, uint8_t, lt_f64, x < y) BINARY_OP_OUT(uint8_t, uint8_t, lt_u8, x < y) BINARY_OP_OUT(uint32_t, uint8_t, lt_u32, x < y) BINARY_OP_OUT(int64_t, uint8_t, lt_i64, x < y) BINARY_OP_OUT(float, uint8_t, le_f32, x <= y) BINARY_OP_OUT(double, uint8_t, le_f64, x <= y) BINARY_OP_OUT(uint8_t, uint8_t, le_u8, x <= y) BINARY_OP_OUT(uint32_t, uint8_t, le_u32, x <= y) BINARY_OP_OUT(int64_t, uint8_t, le_i64, x <= y) BINARY_OP_OUT(float, uint8_t, gt_f32, x > y) BINARY_OP_OUT(double, uint8_t, gt_f64, x > y) BINARY_OP_OUT(uint8_t, uint8_t, gt_u8, x > y) BINARY_OP_OUT(uint32_t, uint8_t, gt_u32, x > y) BINARY_OP_OUT(int64_t, uint8_t, gt_i64, x > y) BINARY_OP_OUT(float, uint8_t, ge_f32, x >= y) BINARY_OP_OUT(double, uint8_t, ge_f64, x >= y) BINARY_OP_OUT(uint8_t, uint8_t, ge_u8, x >= y) BINARY_OP_OUT(uint32_t, uint8_t, ge_u32, x >= y) BINARY_OP_OUT(int64_t, uint8_t, ge_i64, x >= y)
candle/candle-kernels/src/binary.cu/0
{ "file_path": "candle/candle-kernels/src/binary.cu", "repo_id": "candle", "token_count": 2144 }
use anyhow::Result; use candle_metal_kernels::GemmDType; /// This example contains some simple benchmarks so that it's easy to run them in perf etc. use clap::{Parser, Subcommand}; use half::f16; fn run_gemm(f32: bool, n: usize) -> Result<()> { const WARMUP_ITERS: usize = 2; const MIN_DUR: f64 = 4.; let device = metal::Device::system_default().unwrap(); let (b, m, n, k) = (1, n, n, n); let kernels = candle_metal_kernels::Kernels::new(); let command_queue = device.new_command_queue(); let options = metal::MTLResourceOptions::StorageModeManaged; let (lhs, rhs) = if f32 { let lhs: Vec<f32> = (0..b * m * k).map(|f| f as f32).collect(); let rhs: Vec<f32> = (0..b * n * k).map(|f| f as f32).collect(); let lhs = device.new_buffer_with_data( lhs.as_ptr() as *const core::ffi::c_void, std::mem::size_of_val(&lhs) as u64, options, ); let rhs = device.new_buffer_with_data( rhs.as_ptr() as *const core::ffi::c_void, std::mem::size_of_val(&rhs) as u64, options, ); (lhs, rhs) } else { let lhs: Vec<f16> = (0..b * m * k).map(|f| f16::from_f32(f as f32)).collect(); let rhs: Vec<f16> = (0..b * n * k).map(|f| f16::from_f32(f as f32)).collect(); let lhs = device.new_buffer_with_data( lhs.as_ptr() as *const core::ffi::c_void, std::mem::size_of_val(&lhs) as u64, options, ); let rhs = device.new_buffer_with_data( rhs.as_ptr() as *const core::ffi::c_void, std::mem::size_of_val(&rhs) as u64, options, ); (lhs, rhs) }; let (dtype, sizeof) = if f32 { (GemmDType::F32, core::mem::size_of::<f32>()) } else { (GemmDType::F16, core::mem::size_of::<f16>()) }; let output = device.new_buffer((b * m * n * sizeof) as u64, options); let mut sum_dt = 0f64; let mut iters = 0usize; for idx in 0.. { let command_buffer = command_queue.new_command_buffer(); let start_time = std::time::Instant::now(); candle_metal_kernels::call_mlx_gemm( &device, command_buffer, &kernels, dtype, (b, m, n, k), &[m * k, k, 1], 0, &lhs, &[n * k, n, 1], 0, &rhs, &output, )?; command_buffer.commit(); command_buffer.wait_until_completed(); let dt = start_time.elapsed().as_secs_f64(); if idx < WARMUP_ITERS { continue; } sum_dt += dt; iters += 1; if sum_dt > MIN_DUR { break; } } let gflops = (2 * n * n * n * iters) as f64 / (1e9 * sum_dt); println!("{dtype:?}, {n:6} gflops {gflops:.0}"); Ok(()) } #[derive(Subcommand, Debug, Clone)] enum Task { Gemm, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] pub struct Args { /// The benchmark to be run. #[command(subcommand)] task: Task, } fn main() -> Result<()> { let args = Args::parse(); match args.task { Task::Gemm => { for f32 in [false, true] { for n in [512, 1024, 2048, 4096] { run_gemm(f32, n)?; } } } } Ok(()) }
candle/candle-metal-kernels/examples/metal_benchmarks.rs/0
{ "file_path": "candle/candle-metal-kernels/examples/metal_benchmarks.rs", "repo_id": "candle", "token_count": 1833 }
use crate::utils::{BufferOffset, EncoderProvider}; use crate::{set_params, DType, Kernels, MetalKernelError, Source}; use metal::{Buffer, ComputeCommandEncoderRef, Device, MTLResourceOptions, MTLSize}; #[allow(clippy::too_many_arguments)] pub fn call_arg_sort( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, name: &'static str, nrows: usize, ncols: usize, ncols_pad: usize, src: BufferOffset, dst: &Buffer, ) -> Result<(), crate::MetalKernelError> { let pipeline = kernels.load_pipeline(device, Source::Sort, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!(encoder, (&src, dst, ncols as i64, ncols_pad as i64)); let thread_group_count = MTLSize { width: 1, height: nrows as u64, depth: 1, }; let thread_group_size = MTLSize { width: ncols_pad as u64, height: 1, depth: 1, }; encoder.use_resource(src.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(dst, metal::MTLResourceUsage::Write); encoder.set_threadgroup_memory_length(0, (ncols_pad * 4).max(16) as u64); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } fn mlx_dtype_str(dtype: DType) -> &'static str { match dtype { DType::U8 => "uint8", DType::U32 => "uint32", DType::I64 => "int64", DType::F16 => "float16", DType::BF16 => "bfloat16", DType::F32 => "float32", } } #[allow(clippy::too_many_arguments)] pub fn multi_block_sort( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, dtype: DType, bn: usize, tn: usize, nblocks: usize, nrows: usize, ncols: usize, src: BufferOffset, dst: &Buffer, ) -> Result<(), MetalKernelError> { let dtype_str = mlx_dtype_str(dtype); // Do allocations let el_count = nrows * ncols; let bytes_len = (el_count * dtype.size_in_bytes()) as u64; let mut dev_vals_0 = device.new_buffer(bytes_len, MTLResourceOptions::StorageModePrivate); let mut dev_vals_1 = device.new_buffer(bytes_len, MTLResourceOptions::StorageModePrivate); let mut dev_idxs_0 = device.new_buffer(el_count as u64 * 4, MTLResourceOptions::StorageModePrivate); let mut dev_idxs_1 = device.new_buffer(el_count as u64 * 4, MTLResourceOptions::StorageModePrivate); let mut block_partitions = device.new_buffer( (nrows * (nblocks + 1)) as u64 * 4, MTLResourceOptions::StorageModePrivate, ); // Prepare command encoder let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); // Do blockwise sort { let name = format!("sort_mbsort_{dtype_str}_uint32_bn{bn}_tn{tn}"); let pipeline = kernels.load_pipeline(device, Source::MlxSort, name)?; encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( &src, &mut dev_vals_0, &mut dev_idxs_0, /* size_sorted_axis */ ncols as i32, /* stride_sorted_axis */ 1i32, /* nc_dim */ 1i32, /* nc_shape */ nrows as i32, /* nc_str */ ncols as i32 ) ); let thread_group_count = MTLSize { width: nblocks as u64, height: nrows as u64, depth: 1, }; let thread_group_size = MTLSize { width: bn as u64, height: 1, depth: 1, }; encoder.dispatch_thread_groups(thread_group_count, thread_group_size); } // Do merges let mut ping = false; let mut merge_tiles = 2; let n_thr_per_group = usize::min(nblocks + 1, 1024); let partition_name = format!("partition_mbsort_{dtype_str}_uint32_bn{bn}_tn{tn}"); let merge_name = format!("merge_mbsort_float32_uint32_bn{bn}_tn{tn}"); while merge_tiles / 2 < nblocks { let (dev_vals_in, dev_vals_out) = if ping { (&mut dev_vals_1, &mut dev_vals_0) } else { (&mut dev_vals_0, &mut dev_vals_1) }; let (dev_idxs_in, dev_idxs_out) = if ping { (&mut dev_idxs_1, &mut dev_idxs_0) } else { (&mut dev_idxs_0, &mut dev_idxs_1) }; ping = !ping; // Do partition { let pipeline = kernels.load_pipeline(device, Source::MlxSort, partition_name.clone())?; encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( &mut block_partitions, &mut *dev_vals_in, &mut *dev_idxs_in, /* size_sorted_axis */ ncols as i32, /* merge_tiles */ merge_tiles as i32, /* n_blocks */ nblocks as i32 ) ); let thread_group_count = MTLSize { width: 1, height: nrows as u64, depth: 1, }; let thread_group_size = MTLSize { width: n_thr_per_group as u64, height: 1, depth: 1, }; encoder.dispatch_thread_groups(thread_group_count, thread_group_size); } // Do merge { let pipeline = kernels.load_pipeline(device, Source::MlxSort, merge_name.clone())?; encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( &block_partitions, &*dev_vals_in, &*dev_idxs_in, &*dev_vals_out, &*dev_idxs_out, /* size_sorted_axis */ ncols as i32, /* merge_tiles */ merge_tiles as i32, /* n_blocks */ nblocks as i32 ) ); let thread_group_count = MTLSize { width: nblocks as u64, height: nrows as u64, depth: 1, }; let thread_group_size = MTLSize { width: bn as u64, height: 1, depth: 1, }; encoder.dispatch_thread_groups(thread_group_count, thread_group_size); } merge_tiles *= 2; } let dev_idxs_out = if ping { &mut dev_idxs_1 } else { &mut dev_idxs_0 }; // Copy output with appropriate strides let copy_kernel = match dtype { DType::U8 => crate::copy2d::U8, DType::U32 => crate::copy2d::U32, DType::I64 => crate::copy2d::I64, DType::BF16 => crate::copy2d::BFLOAT, DType::F16 => crate::copy2d::HALF, DType::F32 => crate::copy2d::FLOAT, }; crate::call_copy2d( device, encoder, kernels, copy_kernel, dev_idxs_out, dst, /* d1 */ nrows, /* d2 */ ncols, /* src_s */ ncols, /* dst_s */ ncols, /* src_o_in_bytes */ 0, /*dst_o_in_bytes */ 0, )?; Ok(()) } #[allow(clippy::too_many_arguments)] pub fn block_sort( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, dtype: DType, bn: usize, tn: usize, nrows: usize, ncols: usize, src: BufferOffset, dst: &Buffer, ) -> Result<(), MetalKernelError> { let dtype_str = mlx_dtype_str(dtype); let name = format!("carg_block_sort_{dtype_str}_uint32_bn{bn}_tn{tn}"); let pipeline = kernels.load_pipeline(device, Source::MlxSort, name)?; let encoder = ep.encoder(); let encoder: &ComputeCommandEncoderRef = encoder.as_ref(); encoder.set_compute_pipeline_state(&pipeline); set_params!( encoder, ( &src, dst, ncols as i32, 1i32, 1i32, ncols as i32, ncols as i32 ) ); let thread_group_count = MTLSize { width: 1, height: nrows as u64, depth: 1, }; let thread_group_size = MTLSize { width: bn as u64, height: 1, depth: 1, }; encoder.use_resource(src.buffer, metal::MTLResourceUsage::Read); encoder.use_resource(dst, metal::MTLResourceUsage::Write); encoder.dispatch_thread_groups(thread_group_count, thread_group_size); Ok(()) } #[allow(clippy::too_many_arguments)] pub fn call_mlx_arg_sort( device: &Device, ep: impl EncoderProvider, kernels: &Kernels, dtype: DType, nrows: usize, ncols: usize, src: BufferOffset, dst: &Buffer, ) -> Result<(), MetalKernelError> { let tn = 8; let bn = match ncols.div_ceil(tn) { 257.. if dtype.size_in_bytes() <= 4 => 512, 129.. => 256, 0..129 => 128, }; let n_per_block = bn * tn; let n_blocks = ncols.div_ceil(n_per_block); if n_blocks > 1 { multi_block_sort( device, ep, kernels, dtype, bn, tn, n_blocks, nrows, ncols, src, dst, )? } else { block_sort(device, ep, kernels, dtype, bn, tn, nrows, ncols, src, dst)? } Ok(()) }
candle/candle-metal-kernels/src/sort.rs/0
{ "file_path": "candle/candle-metal-kernels/src/sort.rs", "repo_id": "candle", "token_count": 4875 }
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle::{DType, Device, Tensor}; use candle_nn::ops::softmax_last_dim; use criterion::Throughput; use criterion::{black_box, criterion_group, Criterion}; use std::time::Instant; fn run(input: &Tensor) { let _ = softmax_last_dim(&input).unwrap(); } const B: usize = 1; const M: usize = 1024; const K: usize = 1024; fn run_softmax_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) { let elements = B * M * K; let input = Tensor::rand(-1000.0f32, 1000.0f32, (B, M, K), &device) .unwrap() .to_dtype(dtype) .unwrap(); let flops = elements * dtype.size_in_bytes(); let mut group = c.benchmark_group(device.bench_name(name)); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run(black_box(&input)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let device = BenchDeviceHandler::new().unwrap(); for d in device.devices { run_softmax_benchmark(c, &d, DType::F32, "softmax_f32"); run_softmax_benchmark(c, &d, DType::BF16, "softmax_bf16"); run_softmax_benchmark(c, &d, DType::F16, "softmax_f16"); } } criterion_group!(benches, criterion_benchmark);
candle/candle-nn/benches/benchmarks/softmax.rs/0
{ "file_path": "candle/candle-nn/benches/benchmarks/softmax.rs", "repo_id": "candle", "token_count": 662 }
//! Tensor ops. //! use candle::{CpuStorage, DType, Layout, Module, Result, Shape, Tensor, D}; use rayon::prelude::*; /// Applies the softmax function to the input tensor, rescaling the element so that elements on /// a slice of fixed index on dimension `dim` are between 0 and 1 and sum to 1. /// /// ```rust /// use candle::{Tensor, Device, test_utils::to_vec2_round}; /// let a = Tensor::new(&[[0f32, 1., 0., 1.], [-2., 2., 3., -3.]], &Device::Cpu)?; /// let a = candle_nn::ops::softmax(&a, 1)?; /// assert_eq!( /// to_vec2_round(&a, 4)?, /// &[ /// [0.1345, 0.3655, 0.1345, 0.3655], /// [0.0049, 0.2671, 0.7262, 0.0018] /// ]); /// # Ok::<(), candle::Error>(()) /// ``` pub fn softmax<D: candle::shape::Dim>(xs: &Tensor, dim: D) -> Result<Tensor> { let dim = dim.to_index(xs.shape(), "softmax")?; let max = xs.max_keepdim(dim)?; let diff = xs.broadcast_sub(&max)?; let num = diff.exp()?; let den = num.sum_keepdim(dim)?; num.broadcast_div(&den) } pub fn log_softmax<D: candle::shape::Dim>(xs: &Tensor, d: D) -> Result<Tensor> { let d = d.to_index(xs.shape(), "log-softmax")?; let max = xs.max_keepdim(d)?; let diff = xs.broadcast_sub(&max)?; let sum_exp = diff.exp()?.sum_keepdim(d)?; let log_sm = diff.broadcast_sub(&sum_exp.log()?)?; Ok(log_sm) } pub fn silu(xs: &Tensor) -> Result<Tensor> { xs.silu() } pub fn swiglu(xs: &Tensor) -> Result<Tensor> { let xs = xs.chunk(2, D::Minus1)?; &xs[0].silu()? * &xs[1] } struct Sigmoid; impl candle::CustomOp1 for Sigmoid { fn name(&self) -> &'static str { "sigmoid" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { use candle::backend::BackendStorage; fn fwd<T: num_traits::Float>(v: T) -> T { (v.neg().exp() + T::one()).recip() } // FIXME: using `candle::map_dtype` causes compilation errors. let storage = match storage { CpuStorage::BF16(slice) => { CpuStorage::BF16(candle::cpu_backend::unary_map(slice, layout, fwd)) } CpuStorage::F16(slice) => { CpuStorage::F16(candle::cpu_backend::unary_map(slice, layout, fwd)) } CpuStorage::F32(slice) => { CpuStorage::F32(candle::cpu_backend::unary_map(slice, layout, fwd)) } CpuStorage::F64(slice) => { CpuStorage::F64(candle::cpu_backend::unary_map(slice, layout, fwd)) } _ => Err(candle::Error::UnsupportedDTypeForOp( storage.dtype(), self.name(), ))?, }; Ok((storage, layout.shape().clone())) } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &candle::CudaStorage, layout: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::backend::BackendStorage; use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, ValidAsZeroBits, }; use candle::cuda_backend::SlicePtrOrNull; use candle::cuda_backend::{kernel_name, kernels, Map1, WrapErr}; use candle::{CudaDevice, WithDType}; struct S; impl Map1 for S { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el_count as u32); let ds = SlicePtrOrNull::params_from_layout(dev, layout)?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("usigmoid"), kernels::UNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el_count) }.w()?; let params = (el_count, dims.len(), &ds, src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } let dev = storage.device(); let slice = S.map(&storage.slice, dev, layout)?; let dst = candle::CudaStorage { slice, device: dev.clone(), }; Ok((dst, layout.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, storage: &candle::MetalStorage, layout: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; use candle::MetalError; let device = storage.device(); let dtype = storage.dtype(); let shape = layout.shape(); let el_count = shape.elem_count(); let buffer = device.new_buffer(el_count, dtype, "sigmoid")?; let command_buffer = device.command_buffer()?; command_buffer.set_label("sigmoid"); let src = candle_metal_kernels::BufferOffset { buffer: storage.buffer(), offset_in_bytes: layout.start_offset() * storage.dtype().size_in_bytes(), }; match (el_count % 2, dtype, layout.is_contiguous()) { (0, DType::BF16 | DType::F16, true) => { use candle_metal_kernels::unary::contiguous_tiled; let kernel_name = match dtype { DType::F16 => contiguous_tiled::sigmoid::HALF, DType::F32 => contiguous_tiled::sigmoid::FLOAT, DType::BF16 => contiguous_tiled::sigmoid::BFLOAT, dtype => { candle::bail!( "Metal contiguous_tiled unary sigmoid {dtype:?} not implemented" ) } }; candle_metal_kernels::call_unary_contiguous_tiled( device.metal_device(), &command_buffer, device.kernels(), kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } (_, _, true) => { use candle_metal_kernels::unary::contiguous; let kernel_name = match dtype { DType::F16 => contiguous::sigmoid::HALF, DType::F32 => contiguous::sigmoid::FLOAT, DType::BF16 => contiguous::sigmoid::BFLOAT, dtype => { candle::bail!("Metal contiguous unary sigmoid {dtype:?} not implemented") } }; candle_metal_kernels::call_unary_contiguous( device.metal_device(), &command_buffer, device.kernels(), kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } (_, _, false) => { use candle_metal_kernels::unary::strided; let kernel_name = match dtype { DType::F16 => strided::sigmoid::HALF, DType::F32 => strided::sigmoid::FLOAT, DType::BF16 => strided::sigmoid::BFLOAT, dtype => { candle::bail!("Metal strided unary sigmoid {dtype:?} not implemented") } }; let dst = candle_metal_kernels::BufferOffset::zero_offset(&buffer); candle_metal_kernels::call_unary_strided( device.metal_device(), &command_buffer, device.kernels(), kernel_name, layout.dims(), src, layout.stride(), dst, ) .map_err(MetalError::from)?; } } let new_storage = candle::MetalStorage::new(buffer, device.clone(), el_count, dtype); Ok((new_storage, layout.shape().clone())) } fn bwd(&self, _arg: &Tensor, res: &Tensor, grad_res: &Tensor) -> Result<Option<Tensor>> { // d/dx sigmoid(x) = (1 - sigmoid(x)) * sigmoid(x) let d_dx_sigmoid = res.ones_like()?.sub(res)?.mul(res)?; Ok(Some(grad_res.mul(&d_dx_sigmoid)?)) } } pub fn sigmoid(xs: &Tensor) -> Result<Tensor> { xs.apply_op1(Sigmoid) } pub fn hard_sigmoid(xs: &Tensor) -> Result<Tensor> { // TODO: Should we have a specialized op for this? ((xs + 3.0)? / 6.0)?.clamp(0f32, 1f32) } pub fn leaky_relu(xs: &Tensor, negative_slope: f64) -> Result<Tensor> { let zeros = xs.zeros_like()?; xs.maximum(&zeros)? + xs.minimum(&zeros)? * negative_slope } pub fn dropout(xs: &Tensor, drop_p: f32) -> Result<Tensor> { // This implementation is inefficient as it stores the full mask for the backward pass. // Instead we could just store the seed and have a specialized kernel that would both // generate the random mask and apply it. // Another easier optimization would be to be able to generate boolean mask using just a bit of // entropy per element rather than generating a full float per element. if !(0. ..1.).contains(&drop_p) { candle::bail!("dropout probability has to be in [0, 1), got {drop_p}") } let rand = Tensor::rand(0f32, 1f32, xs.shape(), xs.device())?; let scale = 1.0 / (1.0 - drop_p as f64); let drop_p = Tensor::new(drop_p, xs.device())?.broadcast_as(xs.shape())?; let mask = (rand.ge(&drop_p)?.to_dtype(xs.dtype())? * scale)?; xs * mask } #[derive(Clone, Debug)] pub struct Dropout { drop_p: f32, } impl Dropout { pub fn new(drop_p: f32) -> Dropout { Self { drop_p } } pub fn forward(&self, xs: &Tensor, train: bool) -> Result<Tensor> { if train { dropout(xs, self.drop_p) } else { Ok(xs.clone()) } } } impl candle::ModuleT for Dropout { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> { self.forward(xs, train) } } struct SoftmaxLastDim; impl candle::CustomOp1 for SoftmaxLastDim { fn name(&self) -> &'static str { "softmax-last-dim" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { fn softmax<T: candle::WithDType + num_traits::Float>( src: &[T], layout: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let el_count = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let mut dst = vec![T::zero(); el_count]; src.par_chunks(dim_m1) .zip(dst.par_chunks_mut(dim_m1)) .for_each(|(src, dst)| { let mut max = T::neg_infinity(); unsafe { T::vec_reduce_max(src.as_ptr(), &mut max, dim_m1) }; for (s, d) in src.iter().zip(dst.iter_mut()) { *d = (*s - max).exp(); } let mut sum_exp = T::zero(); unsafe { T::vec_reduce_sum(dst.as_ptr(), &mut sum_exp, dim_m1) }; for d in dst.iter_mut() { *d /= sum_exp } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, Shape::from_dims(dims))) } match storage { CpuStorage::BF16(slice) => softmax::<half::bf16>(slice, layout), CpuStorage::F16(slice) => softmax::<half::f16>(slice, layout), CpuStorage::F32(slice) => softmax::<f32>(slice, layout), CpuStorage::F64(slice) => softmax::<f64>(slice, layout), _ => candle::bail!("unsupported dtype for softmax {:?}", storage), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &candle::CudaStorage, layout: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, Map1, WrapErr}; use candle::{CudaDevice, WithDType}; struct S; impl Map1 for S { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let el = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let (n_rows, n_cols) = (el / dim_m1, dim_m1); let cfg = LaunchConfig { grid_dim: (n_rows as u32, 1, 1), block_dim: (1, 32, 1), shared_mem_bytes: 0, }; let func = dev.get_or_load_func(&kernel_name::<T>("softmax"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = (&src, &dst, n_cols as i32); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } use candle::backend::BackendStorage; let dev = storage.device(); let slice = S.map(&storage.slice, dev, layout)?; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, layout.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, storage: &candle::MetalStorage, layout: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = storage.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); let name = match storage.dtype() { DType::F32 => "softmax_f32", DType::F16 => "softmax_f16", DType::BF16 => "softmax_bf16", dtype => candle::bail!("softmax-last-dim is not implemented for {dtype:?}"), }; let n = layout.stride().len(); if !(layout.is_contiguous() && layout.stride()[n - 1] == 1) { candle::bail!("Non contiguous softmax-last-dim is not implemented"); } let last_dim = layout.dims()[layout.shape().rank() - 1]; let elem_count = layout.shape().elem_count(); let output = device.new_buffer(elem_count, storage.dtype(), "softmax")?; candle_metal_kernels::call_last_softmax( device.metal_device(), &command_buffer, kernels, name, elem_count, last_dim, storage.buffer(), layout.start_offset() * storage.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, storage.dtype()); Ok((newstorage, layout.shape().clone())) } } pub fn softmax_last_dim(xs: &Tensor) -> Result<Tensor> { xs.apply_op1_no_bwd(&SoftmaxLastDim) } #[derive(Debug, Clone)] struct RmsNorm { eps: f32, } impl candle::CustomOp2 for RmsNorm { fn name(&self) -> &'static str { "rms-norm" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, ) -> Result<(CpuStorage, Shape)> { use candle::backend::BackendStorage; let eps = self.eps; fn inner< T: candle::WithDType + num_traits::Float + num_traits::AsPrimitive<f32> + num_traits::FromPrimitive, >( src: &[T], layout: &Layout, alpha: &[T], alpha_layout: &Layout, eps: f32, ) -> Result<(CpuStorage, Shape)> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let alpha = match alpha_layout.contiguous_offsets() { None => candle::bail!("alpha has to be contiguous"), Some((o1, o2)) => &alpha[o1..o2], }; let el_count = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let mut dst = vec![T::zero(); el_count]; src.par_chunks(dim_m1) .zip(dst.par_chunks_mut(dim_m1)) .for_each(|(src, dst)| { let sum2 = src .iter() .map(|&v| { let v = v.as_(); v * v }) .sum::<f32>(); let m = (sum2 / dim_m1 as f32 + eps).sqrt(); let m = T::from_f32(m).unwrap_or_else(T::nan); for ((d, s), alpha) in dst.iter_mut().zip(src.iter()).zip(alpha) { *d = *s / m * *alpha } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, Shape::from_dims(dims))) } use CpuStorage as C; match (s1, s2) { (C::BF16(s1), C::BF16(s2)) => inner::<half::bf16>(s1, l1, s2, l2, eps), (C::F16(s1), C::F16(s2)) => inner::<half::f16>(s1, l1, s2, l2, eps), (C::F32(s1), C::F32(s2)) => inner::<f32>(s1, l1, s2, l2, eps), _ => candle::bail!("unsupported dtype for rmsnorm {:?}", s1.dtype()), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, Map2, WrapErr}; use candle::{CudaDevice, WithDType}; struct S { eps: f32, } impl Map2 for S { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, layout: &Layout, alpha: &CudaSlice<T>, alpha_layout: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let alpha = match alpha_layout.contiguous_offsets() { None => candle::bail!("alpha has to be contiguous"), Some((o1, o2)) => alpha.slice(o1..o2), }; let el = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let (n_rows, n_cols) = (el / dim_m1, dim_m1); let block_size = if n_cols < 1024 { 32 } else { 1024 }; let cfg = LaunchConfig { grid_dim: (n_rows as u32, 1, 1), block_dim: (block_size, 1, 1), shared_mem_bytes: 0, }; let func = dev.get_or_load_func(&kernel_name::<T>("rmsnorm"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( &src, &dst, &alpha, n_cols as i32, block_size as i32, self.eps, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } use candle::backend::BackendStorage; let dev = s1.device(); let slice = S { eps: self.eps }.map(&s1.slice, l1, &s2.slice, l2, dev)?; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, s1: &candle::MetalStorage, l1: &Layout, s2: &candle::MetalStorage, l2: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = s1.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); let name = match (s1.dtype(), s2.dtype()) { (DType::F32, DType::F32) => "rmsnorm_f32", (DType::F16, DType::F16) => "rmsnorm_f16", (DType::BF16, DType::BF16) => "rmsnorm_bf16", (dt1, dt2) => candle::bail!("rmsnorm is not implemented for {dt1:?} {dt2:?}"), }; if !(l1.is_contiguous() && l2.is_contiguous()) { candle::bail!("Non contiguous rmsnorm is not implemented"); } let last_dim = l1.dims()[l1.shape().rank() - 1]; let elem_count = l1.shape().elem_count(); let output = device.new_buffer(elem_count, s1.dtype(), "rmsnorm")?; candle_metal_kernels::call_rms_norm( device.metal_device(), &command_buffer, kernels, name, elem_count, last_dim, self.eps, s1.buffer(), l1.start_offset() * s1.dtype().size_in_bytes(), s2.buffer(), l2.start_offset() * s2.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, s1.dtype()); Ok((newstorage, l1.shape().clone())) } } pub fn rms_norm_slow(x: &Tensor, alpha: &Tensor, eps: f32) -> Result<Tensor> { let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = x.dim(D::Minus1)?; let x = x.to_dtype(internal_dtype)?; let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + eps as f64)?.sqrt()?)?; x_normed.to_dtype(x_dtype)?.broadcast_mul(alpha) } pub fn rms_norm(xs: &Tensor, alpha: &Tensor, eps: f32) -> Result<Tensor> { let hidden_size_xs = xs.dim(D::Minus1)?; let hidden_size_alpha = alpha.dims1()?; if hidden_size_xs != hidden_size_alpha { candle::bail!( "shape mismatch in rms-norm {:?} {:?}", xs.shape(), alpha.shape() ) } xs.apply_op2_no_bwd(alpha, &RmsNorm { eps }) } #[derive(Debug, Clone)] struct LayerNorm { eps: f32, } impl candle::CustomOp3 for LayerNorm { fn name(&self) -> &'static str { "layer-norm" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { use candle::backend::BackendStorage; let eps = self.eps; fn inner< T: candle::WithDType + num_traits::Float + num_traits::AsPrimitive<f32> + num_traits::FromPrimitive, >( src: &[T], layout: &Layout, alpha: &[T], alpha_layout: &Layout, beta: &[T], beta_layout: &Layout, eps: f32, ) -> Result<(CpuStorage, Shape)> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let alpha = match alpha_layout.contiguous_offsets() { None => candle::bail!("alpha has to be contiguous"), Some((o1, o2)) => &alpha[o1..o2], }; let beta = match beta_layout.contiguous_offsets() { None => candle::bail!("beta has to be contiguous"), Some((o1, o2)) => &beta[o1..o2], }; let el_count = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let mut dst = vec![T::zero(); el_count]; src.par_chunks(dim_m1) .zip(dst.par_chunks_mut(dim_m1)) .for_each(|(src, dst)| { let mut sum = 0f32; let mut sum2 = 0f32; for v in src { let v = v.as_(); sum += v; sum2 += v * v; } let mean = sum / dim_m1 as f32; let var = sum2 / dim_m1 as f32 - mean * mean; let inv_std = (var + eps).sqrt().recip(); for ((d, s), (alpha, beta)) in dst.iter_mut().zip(src.iter()).zip(alpha.iter().zip(beta)) { let alpha = alpha.as_(); let beta = beta.as_(); let d_ = (s.as_() - mean) * inv_std * alpha + beta; *d = T::from_f32(d_).unwrap_or_else(T::nan); } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, Shape::from_dims(dims))) } use CpuStorage as C; match (s1, s2, s3) { (C::BF16(s1), C::BF16(s2), C::BF16(s3)) => { inner::<half::bf16>(s1, l1, s2, l2, s3, l3, eps) } (C::F16(s1), C::F16(s2), C::F16(s3)) => inner::<half::f16>(s1, l1, s2, l2, s3, l3, eps), (C::F32(s1), C::F32(s2), C::F32(s3)) => inner::<f32>(s1, l1, s2, l2, s3, l3, eps), _ => candle::bail!("unsupported dtype for rmsnorm {:?}", s1.dtype()), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, Map3, WrapErr}; use candle::{CudaDevice, WithDType}; struct S { eps: f32, } impl Map3 for S { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, layout: &Layout, alpha: &CudaSlice<T>, alpha_layout: &Layout, beta: &CudaSlice<T>, beta_layout: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let alpha = match alpha_layout.contiguous_offsets() { None => candle::bail!("alpha has to be contiguous"), Some((o1, o2)) => alpha.slice(o1..o2), }; let beta = match beta_layout.contiguous_offsets() { None => candle::bail!("beta has to be contiguous"), Some((o1, o2)) => beta.slice(o1..o2), }; let el = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let (n_rows, n_cols) = (el / dim_m1, dim_m1); let block_size = if n_cols < 1024 { 32 } else { 1024 }; let cfg = LaunchConfig { grid_dim: (n_rows as u32, 1, 1), block_dim: (block_size, 1, 1), shared_mem_bytes: 0, }; let func = dev.get_or_load_func(&kernel_name::<T>("layernorm"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( &src, &dst, &alpha, &beta, n_cols as i32, block_size as i32, self.eps, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } use candle::backend::BackendStorage; let dev = s1.device(); let slice = S { eps: self.eps }.map(&s1.slice, l1, &s2.slice, l2, &s3.slice, l3, dev)?; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, s1: &candle::MetalStorage, l1: &Layout, s2: &candle::MetalStorage, l2: &Layout, s3: &candle::MetalStorage, l3: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = s1.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); let name = match (s1.dtype(), s2.dtype(), s3.dtype()) { (DType::F32, DType::F32, DType::F32) => "layernorm_f32", (DType::F16, DType::F16, DType::F16) => "layernorm_f16", (DType::BF16, DType::BF16, DType::BF16) => "layernorm_bf16", (dt1, dt2, dt3) => { candle::bail!("layernorm is not implemented for {dt1:?} {dt2:?} {dt3:?}") } }; if !(l1.is_contiguous() && l2.is_contiguous() && l3.is_contiguous()) { candle::bail!("Non contiguous layernorm is not implemented"); } let last_dim = l1.dims()[l1.shape().rank() - 1]; let elem_count = l1.shape().elem_count(); let output = device.new_buffer(elem_count, s1.dtype(), "layernorm")?; candle_metal_kernels::call_layer_norm( device.metal_device(), &command_buffer, kernels, name, elem_count, last_dim, self.eps, s1.buffer(), l1.start_offset() * s1.dtype().size_in_bytes(), s2.buffer(), l2.start_offset() * s2.dtype().size_in_bytes(), s3.buffer(), l3.start_offset() * s3.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, s1.dtype()); Ok((newstorage, l1.shape().clone())) } } pub fn layer_norm_slow(x: &Tensor, alpha: &Tensor, beta: &Tensor, eps: f32) -> Result<Tensor> { let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = x.dim(D::Minus1)?; let x = x.to_dtype(internal_dtype)?; let x = { let mean_x = (x.sum_keepdim(D::Minus1)? / hidden_size as f64)?; x.broadcast_sub(&mean_x)? }; let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + eps as f64)?.sqrt()?)?; x_normed .to_dtype(x_dtype)? .broadcast_mul(alpha)? .broadcast_add(beta) } pub fn layer_norm(xs: &Tensor, alpha: &Tensor, beta: &Tensor, eps: f32) -> Result<Tensor> { let hidden_size_xs = xs.dim(D::Minus1)?; let hidden_size_alpha = alpha.dims1()?; let hidden_size_beta = beta.dims1()?; if hidden_size_xs != hidden_size_alpha || hidden_size_xs != hidden_size_beta { candle::bail!( "shape mismatch in layer-norm src: {:?} alpha: {:?} beta: {:?}", xs.shape(), alpha.shape(), beta.shape() ) } xs.apply_op3_no_bwd(alpha, beta, &LayerNorm { eps }) } // https://pytorch.org/docs/stable/generated/torch.nn.PixelShuffle.html pub fn pixel_shuffle(xs: &Tensor, upscale_factor: usize) -> Result<Tensor> { let (b_size, c, h, w) = xs.dims4()?; let out_c = c / upscale_factor / upscale_factor; xs.reshape((b_size, out_c, upscale_factor, upscale_factor, h, w))? .permute((0, 1, 4, 2, 5, 3))? .reshape((b_size, out_c, h * upscale_factor, w * upscale_factor)) } pub fn pixel_unshuffle(xs: &Tensor, downscale_factor: usize) -> Result<Tensor> { let (b_size, c, h, w) = xs.dims4()?; let out_c = c * downscale_factor * downscale_factor; xs.reshape(( b_size, c, h / downscale_factor, downscale_factor, w / downscale_factor, downscale_factor, ))? .permute((0, 1, 3, 5, 2, 4))? .reshape((b_size, out_c, h / downscale_factor, w / downscale_factor)) } // https://pytorch.org/docs/stable/generated/torch.nn.ReplicationPad2d.html pub fn replication_pad2d(xs: &Tensor, pad: usize) -> Result<Tensor> { match pad { 0 => Ok(xs.clone()), 1 => { let (_b_size, _c, h, w) = xs.dims4()?; let (first, last) = (xs.narrow(3, 0, 1)?, xs.narrow(3, w - 1, 1)?); let xs = Tensor::cat(&[&first, xs, &last], 3)?; let (first, last) = (xs.narrow(2, 0, 1)?, xs.narrow(2, h - 1, 1)?); Tensor::cat(&[&first, &xs, &last], 2) } n => candle::bail!("replication-pad with a size of {n} is not supported"), } } #[derive(Clone, Debug)] pub struct Identity; impl Identity { pub fn new() -> Identity { Self } } impl Default for Identity { fn default() -> Self { Self } } impl Module for Identity { fn forward(&self, xs: &Tensor) -> Result<Tensor> { Ok(xs.clone()) } } #[allow(dead_code)] struct Sdpa { scale: f32, softcapping: f32, } impl candle::CustomOp3 for Sdpa { fn name(&self) -> &'static str { "metal-sdpa" } fn cpu_fwd( &self, _s1: &CpuStorage, _l1: &Layout, _s2: &CpuStorage, _l2: &Layout, _s3: &CpuStorage, _l3: &Layout, ) -> Result<(CpuStorage, Shape)> { candle::bail!("SDPA has no cpu impl") } #[cfg(feature = "metal")] fn metal_fwd( &self, q: &candle::MetalStorage, q_l: &Layout, k: &candle::MetalStorage, k_l: &Layout, v: &candle::MetalStorage, v_l: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; use candle_metal_kernels::SdpaDType; let device = q.device(); let out_dims = vec![q_l.dim(0)?, q_l.dim(1)?, q_l.dim(2)?, v_l.dim(3)?]; let elem_count: usize = out_dims.iter().product(); let output = device.new_buffer(elem_count, q.dtype(), "sdpa_o")?; // q,k must have matching emb dim if q_l.dim(D::Minus1)? != k_l.dim(D::Minus1)? { candle::bail!("`q` and `k` last dims must match"); } // k,v must have matching n kv heads if v_l.dim(D::Minus(3))? != k_l.dim(D::Minus(3))? { candle::bail!("`k` and `v` head dims must match"); } // n_heads % n_kv_heads == 0; n_heads >= 1, n_kv_heads >= 1. if q_l.dim(D::Minus(3))? % k_l.dim(D::Minus(3))? != 0 { candle::bail!("query `n_heads` must be a multiple of `n_kv_heads`"); } let k_head = k_l.dim(D::Minus1)?; let q_head = q_l.dim(D::Minus1)?; let q_seq = q_l.dim(2)?; let mut implementation_supports_use_case = q_head == k_head; let supported_head_dim = q_head == 32 || q_head == 64 || q_head == 96 || q_head == 128 || q_head == 256; const SDPA_FULL_THRESHOLD: usize = 2; let supports_sdpa_full = q_seq >= SDPA_FULL_THRESHOLD && supported_head_dim && q_head == k_head; let supports_sdpa_vector = q_seq == 1 && supported_head_dim; implementation_supports_use_case &= supports_sdpa_full || supports_sdpa_vector; if !supported_head_dim { candle::bail!( "Meta SDPA does not support q head dim {q_head}: q dims {:?}, k dims {:?}, v dims {:?}.", q_l.dims(), k_l.dims(), v_l.dims() ); } if !implementation_supports_use_case { candle::bail!( "Meta SDPA does not support q dims {:?}, k dims {:?}, v dims {:?}.", q_l.dims(), k_l.dims(), v_l.dims() ); } for t in [k.dtype(), v.dtype()] { if q.dtype() != t { candle::bail!("all q, k, v dtypes must match."); } } let itype = match q.dtype() { DType::BF16 => SdpaDType::BF16, DType::F16 => SdpaDType::F16, DType::F32 => SdpaDType::F32, other => candle::bail!("unsupported sdpa type {other:?}"), }; let command_buffer = q.device().command_buffer()?; if supports_sdpa_vector { // Route to the 2 pass fused attention if the k seqlen is large. // https://github.com/ml-explore/mlx/pull/1597 const TWO_PASS_K_THRESHOLD: usize = 1024; if k_l.dim(2)? >= TWO_PASS_K_THRESHOLD { let mut intermediate_shape = [ &out_dims[0..out_dims.len() - 2], &[candle_metal_kernels::SDPA_2PASS_BLOCKS], &[out_dims[out_dims.len() - 1]], ] .concat(); let intermediate = device.new_buffer( intermediate_shape.iter().product::<usize>(), DType::F32, "sdpa_2pass_intermediate", )?; let _ = intermediate_shape.pop().unwrap(); let sums = device.new_buffer( intermediate_shape.iter().product::<usize>(), DType::F32, "sdpa_2pass_sums", )?; let maxs = device.new_buffer( intermediate_shape.iter().product::<usize>(), DType::F32, "sdpa_2pass_maxs", )?; command_buffer.set_label("vector_attention"); candle_metal_kernels::call_sdpa_vector_2pass( q.device().device(), &command_buffer, q.device().kernels(), q_l.start_offset(), q_l.dims(), q.buffer(), k_l.start_offset(), k_l.dims(), k_l.stride(), k.buffer(), v_l.start_offset(), v_l.stride(), v.buffer(), &output, &intermediate, &sums, &maxs, self.scale, self.softcapping, itype, ) .map_err(candle::Error::wrap)?; } else { command_buffer.set_label("vector_attention"); candle_metal_kernels::call_sdpa_vector( q.device().device(), &command_buffer, q.device().kernels(), q_l.start_offset(), q_l.dims(), q.buffer(), k_l.start_offset(), k_l.dims(), k_l.stride(), k.buffer(), v_l.start_offset(), v_l.stride(), v.buffer(), &output, self.scale, self.softcapping, itype, ) .map_err(candle::Error::wrap)?; } } else if supports_sdpa_full { if q_l.dim(2)? != k_l.dim(2)? { candle::bail!( "query and key sequence length must be equal if using full metal sdpa" ) } command_buffer.set_label("full_attention"); candle_metal_kernels::call_sdpa_full( q.device().device(), &command_buffer, q.device().kernels(), q_l.start_offset(), q_l.dims(), q.buffer(), k_l.start_offset(), k.buffer(), v_l.start_offset(), v.buffer(), &output, self.scale, self.softcapping, itype, ) .map_err(candle::Error::wrap)?; } else { candle::bail!("must be vector or full sdpa kernel"); } let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, q.dtype()); Ok((newstorage, Shape::from_dims(&out_dims))) } } /// Scaled dot product attention with a fused kernel. /// /// Computes softmax(qk^T*scale)v. /// /// **Inputs shapes:** /// - `q`: (bs, qhead, seq, hidden) /// - `k`: (bs, kv_head, kv_seq, hidden) /// - `k`: (bs, kv_head, kv_seq, v_hidden) /// - `scale` is applied before softmax. /// - If `softcapping` != 1.0: /// - Computation is: softmax(tanh(qk^T*scale/cap)*cap)v /// /// **Output shape:** (bs, qhead, seq, v_hidden) /// /// **Supported head dims:** 32, 64, 96, 128, 256. /// /// ## On Metal: /// - If `seq` == 1: /// - Use a vectorized kernel /// - Supports `seq` != `kv_seq` (cross attn. support) /// - Supports GQA when `qhead` is a multiple of `kv_head` /// - Otherwise: /// - Use an alternate kernel /// - Requires `seq` == `kv_seq` /// - GQA is not supported (requires `qhead` == `kv_head`) pub fn sdpa(q: &Tensor, k: &Tensor, v: &Tensor, scale: f32, softcapping: f32) -> Result<Tensor> { q.apply_op3_no_bwd(k, v, &Sdpa { scale, softcapping }) }
candle/candle-nn/src/ops.rs/0
{ "file_path": "candle/candle-nn/src/ops.rs", "repo_id": "candle", "token_count": 23996 }
#[cfg(feature = "metal")] mod metal_sdpa_tests { use candle::{DType, Device, Result, Shape, Tensor}; use rand::SeedableRng; use rand_distr::Distribution; use std::ops::{Div, Mul}; fn randn<S: Into<Shape>>( rng: &mut rand::rngs::StdRng, shape: S, dev: &Device, ) -> Result<Tensor> { let shape = shape.into(); let elem_count = shape.elem_count(); let normal = rand_distr::Normal::new(0.0, 1.0).unwrap(); let vs: Vec<f32> = (0..elem_count).map(|_| normal.sample(rng)).collect(); Tensor::from_vec(vs, &shape, dev) } #[test] fn sdpa_full() -> Result<()> { // Force seqlen = 100 const BS: usize = 4; const R: usize = 4; const L: usize = 4; const DK: usize = 64; const H: usize = 3; let scale: f64 = f64::from(DK as u32).sqrt().recip(); let device = Device::new_metal(0)?; let mut rng = rand::rngs::StdRng::seed_from_u64(42); let q = randn(&mut rng, (BS, H, R, DK), &device)?; let k = randn(&mut rng, (BS, H, L, DK), &device)?; let v = randn(&mut rng, (BS, H, L, DK), &device)?; let ground_truth = { let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?; let att = candle_nn::ops::softmax_last_dim(&att.to_dtype(DType::F32)?)? .to_dtype(q.dtype())?; att.matmul(&v.clone())? }; let sdpa_output = candle_nn::ops::sdpa(&q, &k, &v, scale as f32, 1.)?; assert_eq!(ground_truth.shape(), sdpa_output.shape()); let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)? .sum_all()? .to_scalar()?; assert!(error <= 0.0004, "{}", error); Ok(()) } #[test] fn sdpa_vector() -> Result<()> { // Allow vectorized, seqlen = 1 const BS: usize = 4; const R: usize = 1; const L: usize = 1; const DK: usize = 64; const H: usize = 3; let scale: f64 = f64::from(DK as u32).sqrt().recip(); let device = Device::new_metal(0)?; let mut rng = rand::rngs::StdRng::seed_from_u64(4242); let q = randn(&mut rng, (BS, H, R, DK), &device)?; let k = randn(&mut rng, (BS, H, L, DK), &device)?; let v = randn(&mut rng, (BS, H, L, DK), &device)?; let ground_truth = { let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?; let att = candle_nn::ops::softmax_last_dim(&att.to_dtype(DType::F32)?)? .to_dtype(q.dtype())?; att.matmul(&v.clone())? }; let sdpa_output = candle_nn::ops::sdpa(&q, &k, &v, scale as f32, 1.)?; assert_eq!(ground_truth.shape(), sdpa_output.shape()); let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)? .sum_all()? .to_scalar()?; assert!(error <= 0.000, "{}", error); Ok(()) } #[test] fn sdpa_full_softcapping() -> Result<()> { // Allow vectorized, seqlen = 1 const BS: usize = 4; const R: usize = 4; const L: usize = 4; const DK: usize = 64; const H: usize = 3; const SOFTCAP: f64 = 50.; let scale: f64 = f64::from(DK as u32).sqrt().recip(); let device = Device::new_metal(0)?; let mut rng = rand::rngs::StdRng::seed_from_u64(424242); let q = randn(&mut rng, (BS, H, R, DK), &device)?; let k = randn(&mut rng, (BS, H, L, DK), &device)?; let v = randn(&mut rng, (BS, H, L, DK), &device)?; let ground_truth = { let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?; let att = candle_nn::ops::softmax_last_dim( &att.to_dtype(DType::F32)? .div(SOFTCAP)? .tanh()? .mul(SOFTCAP)?, )? .to_dtype(q.dtype())?; att.matmul(&v.clone())? }; let sdpa_output = candle_nn::ops::sdpa(&q, &k, &v, scale as f32, SOFTCAP as f32)?; assert_eq!(ground_truth.shape(), sdpa_output.shape()); let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)? .sum_all()? .to_scalar()?; assert!(error <= 0.0005, "{}", error); Ok(()) } #[test] fn sdpa_vector_softcapping() -> Result<()> { // Allow vectorized, seqlen = 1 const BS: usize = 4; const R: usize = 1; const L: usize = 1; const DK: usize = 64; const H: usize = 3; const SOFTCAP: f64 = 50.; let scale: f64 = f64::from(DK as u32).sqrt().recip(); let device = Device::new_metal(0)?; let mut rng = rand::rngs::StdRng::seed_from_u64(42424242); let q = randn(&mut rng, (BS, H, R, DK), &device)?; let k = randn(&mut rng, (BS, H, L, DK), &device)?; let v = randn(&mut rng, (BS, H, L, DK), &device)?; let ground_truth = { let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?; let att = candle_nn::ops::softmax_last_dim( &att.to_dtype(DType::F32)? .div(SOFTCAP)? .tanh()? .mul(SOFTCAP)?, )? .to_dtype(q.dtype())?; att.matmul(&v.clone())? }; let sdpa_output = candle_nn::ops::sdpa(&q, &k, &v, scale as f32, SOFTCAP as f32)?; assert_eq!(ground_truth.shape(), sdpa_output.shape()); let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)? .sum_all()? .to_scalar()?; assert!(error <= 0.0001, "{}", error); Ok(()) } #[test] fn sdpa_vector_cross() -> Result<()> { // Allow vectorized, seqlen = 1. Simulat cross attention case where R != L, R = 1 const BS: usize = 4; const R: usize = 1; const L: usize = 24; const DK: usize = 64; const H: usize = 3; let scale: f64 = f64::from(DK as u32).sqrt().recip(); let device = Device::new_metal(0)?; let mut rng = rand::rngs::StdRng::seed_from_u64(4242424242); let q = randn(&mut rng, (BS, H, R, DK), &device)?; let k = randn(&mut rng, (BS, H, L, DK), &device)?; let v = randn(&mut rng, (BS, H, L, DK), &device)?; let ground_truth = { let att = (q.clone() * scale)?.matmul(&k.clone().t()?)?; let att = candle_nn::ops::softmax_last_dim(&att.to_dtype(DType::F32)?)? .to_dtype(q.dtype())?; att.matmul(&v.clone())? }; let sdpa_output = candle_nn::ops::sdpa(&q, &k, &v, scale as f32, 1.)?; assert_eq!(ground_truth.shape(), sdpa_output.shape()); let error: f32 = ((&ground_truth - &sdpa_output)?.abs()? / &ground_truth.abs()?)? .sum_all()? .to_scalar()?; assert!(error <= 0.0013, "{}", error); Ok(()) } }
candle/candle-nn/tests/sdpa.rs/0
{ "file_path": "candle/candle-nn/tests/sdpa.rs", "repo_id": "candle", "token_count": 3762 }
# Generated content DO NOT EDIT from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike from candle.typing import _ArrayLike, Device, Scalar, Index, Shape class bf16(DType): pass @staticmethod def cat(tensors: List[Tensor], dim: int) -> Tensor: """ Concatenate the tensors across one axis. """ pass class f16(DType): pass class f32(DType): pass class f64(DType): pass class i64(DType): pass @staticmethod def ones(*shape: Shape, dtype: Optional[DType] = None, device: Optional[Device] = None) -> Tensor: """ Creates a new tensor filled with ones. """ pass @staticmethod def rand(*shape: Shape, device: Optional[Device] = None) -> Tensor: """ Creates a new tensor with random values. """ pass @staticmethod def randn(*shape: Shape, device: Optional[Device] = None) -> Tensor: """ Creates a new tensor with random values from a normal distribution. """ pass @staticmethod def stack(tensors: List[Tensor], dim: int) -> Tensor: """ Stack the tensors along a new axis. """ pass @staticmethod def tensor(data: _ArrayLike) -> Tensor: """ Creates a new tensor from a Python value. The value can be a scalar or array-like object. """ pass class u32(DType): pass class u8(DType): pass @staticmethod def zeros(*shape: Shape, dtype: Optional[DType] = None, device: Optional[Device] = None) -> Tensor: """ Creates a new tensor filled with zeros. """ pass class DType: """ A `candle` dtype. """ class QTensor: """ A quantized tensor. """ def dequantize(self) -> Tensor: """ Dequantizes the tensor. """ pass @property def ggml_dtype(self) -> str: """ Gets the tensors quantized dtype. """ pass def matmul_t(self, lhs: Tensor) -> Tensor: """ Performs a quantized matrix multiplication, with the quantized tensor as the right hand side. """ pass @property def rank(self) -> int: """ Gets the rank of the tensor. """ pass @property def shape(self) -> Tuple[int]: """ Gets the shape of the tensor. """ pass class Tensor: """ A `candle` tensor. """ def __init__(self, data: _ArrayLike): pass def __add__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __eq__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ge__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __getitem__(self, index: Union[Index, Tensor, Sequence[Index]]) -> "Tensor": """ Return a slice of a tensor. """ pass def __gt__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __le__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __lt__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __mul__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __ne__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __radd__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __richcmp__(self, rhs: Union[Tensor, Scalar], op) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __rmul__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __sub__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Subtract a scalar from a tensor or one tensor from another. """ pass def __truediv__(self, rhs: Union[Tensor, Scalar]) -> "Tensor": """ Divide a tensor by a scalar or one tensor by another. """ pass def abs(self) -> Tensor: """ Performs the `abs` operation on the tensor. """ pass def argmax_keepdim(self, dim: int) -> Tensor: """ Returns the indices of the maximum value(s) across the selected dimension. """ pass def argmin_keepdim(self, dim: int) -> Tensor: """ Returns the indices of the minimum value(s) across the selected dimension. """ pass def broadcast_add(self, rhs: Tensor) -> Tensor: """ Adds the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. """ pass def broadcast_as(self, *shape: Shape) -> Tensor: """ Broadcasts the tensor to the given shape. """ pass def broadcast_div(self, rhs: Tensor) -> Tensor: """ Divides the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. """ pass def broadcast_left(self, *shape: Shape) -> Tensor: """ Broadcasts the tensor to the given shape, adding new dimensions on the left. """ pass def broadcast_mul(self, rhs: Tensor) -> Tensor: """ Multiplies the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. """ pass def broadcast_sub(self, rhs: Tensor) -> Tensor: """ Subtracts the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. """ pass def contiguous(self) -> Tensor: """ Makes the tensor contiguous in memory. """ pass def copy(self) -> Tensor: """ Returns a copy of the tensor. """ pass def cos(self) -> Tensor: """ Performs the `cos` operation on the tensor. """ pass def detach(self) -> Tensor: """ Detach the tensor from the computation graph. """ pass @property def device(self) -> Device: """ Gets the tensor's device. """ pass @property def dtype(self) -> DType: """ Gets the tensor's dtype. """ pass def exp(self) -> Tensor: """ Performs the `exp` operation on the tensor. """ pass def flatten_all(self) -> Tensor: """ Flattens the tensor into a 1D tensor. """ pass def flatten_from(self, dim: int) -> Tensor: """ Flattens the tensor on the dimension indexes from `dim` (inclusive) to the last dimension. """ pass def flatten_to(self, dim: int) -> Tensor: """ Flattens the tensor on the dimension indexes from `0` to `dim` (inclusive). """ pass def gather(self, index, dim): """ Gathers values along an axis specified by dim. """ pass def get(self, index: int) -> Tensor: """ Gets the value at the specified index. """ pass def index_select(self, rhs: Tensor, dim: int) -> Tensor: """ Select values for the input tensor at the target indexes across the specified dimension. The `indexes` is argument is an int tensor with a single dimension. The output has the same number of dimension as the `self` input. The target dimension of the output has length the length of `indexes` and the values are taken from `self` using the index from `indexes`. Other dimensions have the same number of elements as the input tensor. """ pass def is_contiguous(self) -> bool: """ Returns true if the tensor is contiguous in C order. """ pass def is_fortran_contiguous(self) -> bool: """ Returns true if the tensor is contiguous in Fortran order. """ pass def log(self) -> Tensor: """ Performs the `log` operation on the tensor. """ pass def matmul(self, rhs: Tensor) -> Tensor: """ Performs a matrix multiplication between the two tensors. """ pass def max_keepdim(self, dim: int) -> Tensor: """ Gathers the maximum value across the selected dimension. """ pass def mean_all(self) -> Tensor: """ Returns the mean of the tensor. """ pass def min_keepdim(self, dim: int) -> Tensor: """ Gathers the minimum value across the selected dimension. """ pass def narrow(self, dim: int, start: int, len: int) -> Tensor: """ Returns a new tensor that is a narrowed version of the input, the dimension `dim` ranges from `start` to `start + len`. """ pass @property def nelement(self) -> int: """ Gets the tensor's element count. """ pass def powf(self, p: float) -> Tensor: """ Performs the `pow` operation on the tensor with the given exponent. """ pass def quantize(self, quantized_dtype: str) -> QTensor: """ Quantize the tensor. """ pass @property def rank(self) -> int: """ Gets the tensor's rank. """ pass def recip(self) -> Tensor: """ Get the `recip` of the tensor. """ pass def reshape(self, *shape: Shape) -> Tensor: """ Reshapes the tensor to the given shape. """ pass @property def shape(self) -> Tuple[int]: """ Gets the tensor's shape. """ pass def sin(self) -> Tensor: """ Performs the `sin` operation on the tensor. """ pass def sqr(self) -> Tensor: """ Squares the tensor. """ pass def sqrt(self) -> Tensor: """ Calculates the square root of the tensor. """ pass def squeeze(self, dim: int) -> Tensor: """ Creates a new tensor with the specified dimension removed if its size was one. """ pass @property def stride(self) -> Tuple[int]: """ Gets the tensor's strides. """ pass def sum_all(self) -> Tensor: """ Returns the sum of the tensor. """ pass def sum_keepdim(self, dim: Union[int, List[int]]) -> Tensor: """ Returns the sum of all elements in the input tensor. The sum is performed over all the input dimensions. """ pass def t(self) -> Tensor: """ Transposes the tensor. """ pass def to(self, *args, **kwargs) -> Tensor: """ Performs Tensor dtype and/or device conversion. """ pass def to_device(self, device: Union[str, Device]) -> Tensor: """ Move the tensor to a new device. """ pass def to_dtype(self, dtype: Union[str, DType]) -> Tensor: """ Convert the tensor to a new dtype. """ pass def to_torch(self) -> torch.Tensor: """ Converts candle's tensor to pytorch's tensor """ pass def transpose(self, dim1: int, dim2: int) -> Tensor: """ Returns a tensor that is a transposed version of the input, the given dimensions are swapped. """ pass def unsqueeze(self, dim: int) -> Tensor: """ Creates a new tensor with a dimension of size one inserted at the specified position. """ pass def values(self) -> _ArrayLike: """ Gets the tensor's data as a Python scalar or array-like object. """ pass def where_cond(self, on_true: Tensor, on_false: Tensor) -> Tensor: """ Returns a tensor with the same shape as the input tensor, the values are taken from `on_true` if the input tensor value is not zero, and `on_false` at the positions where the input tensor is equal to zero. """ pass
candle/candle-pyo3/py_src/candle/__init__.pyi/0
{ "file_path": "candle/candle-pyo3/py_src/candle/__init__.pyi", "repo_id": "candle", "token_count": 5844 }
# Generated content DO NOT EDIT from .. import utils cuda_is_available = utils.cuda_is_available get_num_threads = utils.get_num_threads has_accelerate = utils.has_accelerate has_mkl = utils.has_mkl load_ggml = utils.load_ggml load_gguf = utils.load_gguf load_safetensors = utils.load_safetensors save_gguf = utils.save_gguf save_safetensors = utils.save_safetensors
candle/candle-pyo3/py_src/candle/utils/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/utils/__init__.py", "repo_id": "candle", "token_count": 150 }
import candle from candle import Tensor from candle.utils import cuda_is_available from candle.testing import assert_equal import pytest def test_tensor_can_be_constructed(): t = Tensor(42.0) assert t.values() == 42.0 def test_tensor_can_be_constructed_from_list(): t = Tensor([3.0, 1, 4, 1, 5, 9, 2, 6]) assert t.values() == [3.0, 1, 4, 1, 5, 9, 2, 6] def test_tensor_can_be_constructed_from_list_of_lists(): t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]]) assert t.values() == [[3.0, 1, 4, 1], [5, 9, 2, 6]] def test_tensor_can_be_quantized(): t = candle.randn((16, 256)) for format in [ "q4_0", "q4_1", "q5_0", "q5_1", "q8_0", "q2k", "q3k", "q4k", "q5k", "q8k", ]: for formatted_format in [format.upper(), format.lower()]: quant_t = t.quantize(formatted_format) assert quant_t.ggml_dtype.lower() == format.lower() assert quant_t.shape == t.shape def test_tensor_can_be_indexed(): t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]]) assert t[0].values() == [3.0, 1.0, 4.0, 1.0] assert t[1].values() == [5.0, 9.0, 2.0, 6.0] assert t[-1].values() == [5.0, 9.0, 2.0, 6.0] assert t[-2].values() == [3.0, 1.0, 4.0, 1.0] def test_tensor_can_be_sliced(): t = Tensor([3.0, 1, 4, 10, 5, 9, 2, 6]) assert t[0:4].values() == [3.0, 1.0, 4.0, 10.0] assert t[4:8].values() == [5.0, 9.0, 2.0, 6.0] assert t[-4:].values() == [5.0, 9.0, 2.0, 6.0] assert t[:-4].values() == [3.0, 1.0, 4.0, 10.0] assert t[-4:-2].values() == [5.0, 9.0] assert t[...].values() == t.values() def test_tensor_can_be_sliced_2d(): t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]]) assert t[:, 0].values() == [3.0, 5] assert t[:, 1].values() == [1.0, 9.0] assert t[0, 0].values() == 3.0 assert t[:, -1].values() == [1.0, 6.0] assert t[:, -4].values() == [3.0, 5] assert t[..., 0].values() == [3.0, 5] def test_tensor_can_be_scliced_3d(): t = Tensor([[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]) assert t[:, :, 0].values() == [[1, 5], [9, 13]] assert t[:, :, 0:2].values() == [[[1, 2], [5, 6]], [[9, 10], [13, 14]]] assert t[:, 0, 0].values() == [1, 9] assert t[..., 0].values() == [[1, 5], [9, 13]] assert t[..., 0:2].values() == [[[1, 2], [5, 6]], [[9, 10], [13, 14]]] def assert_bool(t: Tensor, expected: bool): assert t.shape == () assert str(t.dtype) == str(candle.u8) assert bool(t.values()) == expected def test_tensor_supports_equality_operations_with_scalars(): t = Tensor(42.0) assert_bool(t == 42.0, True) assert_bool(t == 43.0, False) assert_bool(t != 42.0, False) assert_bool(t != 43.0, True) assert_bool(t > 41.0, True) assert_bool(t > 42.0, False) assert_bool(t >= 41.0, True) assert_bool(t >= 42.0, True) assert_bool(t < 43.0, True) assert_bool(t < 42.0, False) assert_bool(t <= 43.0, True) assert_bool(t <= 42.0, True) def test_tensor_supports_equality_operations_with_tensors(): t = Tensor(42.0) same = Tensor(42.0) other = Tensor(43.0) assert_bool(t == same, True) assert_bool(t == other, False) assert_bool(t != same, False) assert_bool(t != other, True) assert_bool(t > same, False) assert_bool(t > other, False) assert_bool(t >= same, True) assert_bool(t >= other, False) assert_bool(t < same, False) assert_bool(t < other, True) assert_bool(t <= same, True) assert_bool(t <= other, True) def test_tensor_equality_operations_can_broadcast(): # Create a decoder attention mask as a test case # e.g. # [[1,0,0] # [1,1,0] # [1,1,1]] mask_cond = candle.Tensor([0, 1, 2]) mask = mask_cond < (mask_cond + 1).reshape((3, 1)) assert mask.shape == (3, 3) assert_equal(mask, Tensor([[1, 0, 0], [1, 1, 0], [1, 1, 1]]).to_dtype(candle.u8)) def test_tensor_can_be_hashed(): t = Tensor(42.0) other = Tensor(42.0) # Hash should represent a unique tensor assert hash(t) != hash(other) assert hash(t) == hash(t) def test_tensor_can_be_expanded_with_none(): t = candle.rand((12, 12)) b = t[None] assert b.shape == (1, 12, 12) c = t[:, None, None, :] assert c.shape == (12, 1, 1, 12) d = t[None, :, None, :] assert d.shape == (1, 12, 1, 12) e = t[None, None, :, :] assert e.shape == (1, 1, 12, 12) f = t[:, :, None] assert f.shape == (12, 12, 1) def test_tensor_can_be_index_via_tensor(): t = candle.Tensor([[1, 2, 1, 2], [3, 4, 3, 4], [5, 6, 5, 6]]) indexed = t[candle.Tensor([0, 2])] assert indexed.shape == (2, 4) assert indexed.values() == [[1, 2, 1, 2], [5, 6, 5, 6]] indexed = t[:, candle.Tensor([0, 2])] assert indexed.shape == (3, 2) assert indexed.values() == [[1, 1], [3, 3], [5, 5]] def test_tensor_can_be_index_via_list(): t = candle.Tensor([[1, 2, 1, 2], [3, 4, 3, 4], [5, 6, 5, 6]]) indexed = t[[0, 2]] assert indexed.shape == (2, 4) assert indexed.values() == [[1, 2, 1, 2], [5, 6, 5, 6]] indexed = t[:, [0, 2]] assert indexed.shape == (3, 2) assert indexed.values() == [[1, 1], [3, 3], [5, 5]] def test_tensor_can_be_cast_via_to(): t = Tensor(42.0) assert str(t.dtype) == str(candle.f32) t_new_args = t.to(candle.f64) assert str(t_new_args.dtype) == str(candle.f64) t_new_kwargs = t.to(dtype=candle.f64) assert str(t_new_kwargs.dtype) == str(candle.f64) pytest.raises(TypeError, lambda: t.to("not a dtype")) pytest.raises(TypeError, lambda: t.to(dtype="not a dtype")) pytest.raises(TypeError, lambda: t.to(candle.f64, "not a dtype")) pytest.raises(TypeError, lambda: t.to()) pytest.raises(ValueError, lambda: t.to(candle.f16, dtype=candle.f64)) pytest.raises(ValueError, lambda: t.to(candle.f16, candle.f16)) other = Tensor(42.0).to(candle.f64) t_new_other_args = t.to(other) assert str(t_new_other_args.dtype) == str(candle.f64) t_new_other_kwargs = t.to(other=other) assert str(t_new_other_kwargs.dtype) == str(candle.f64) @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_tensor_can_be_moved_via_to(): t = Tensor(42.0) assert t.device == "cpu" t_new_args = t.to("cuda") assert t_new_args.device == "cuda" t_new_kwargs = t.to(device="cuda") assert t_new_kwargs.device == "cuda" pytest.raises(TypeError, lambda: t.to("not a device")) pytest.raises(TypeError, lambda: t.to(device="not a device")) pytest.raises(TypeError, lambda: t.to("cuda", "not a device")) pytest.raises(TypeError, lambda: t.to()) pytest.raises(ValueError, lambda: t.to("cuda", device="cpu")) pytest.raises(ValueError, lambda: t.to("cuda", "cuda")) other = Tensor(42.0).to("cuda") t_new_other_args = t.to(other) assert t_new_other_args.device == "cuda" t_new_other_kwargs = t.to(other=other) assert t_new_other_kwargs.device == "cuda" @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_tensor_can_be_moved_and_cast_via_to(): t = Tensor(42.0) assert t.device == "cpu" assert str(t.dtype) == str(candle.f32) t_new_args = t.to("cuda", candle.f64) assert t_new_args.device == "cuda" assert str(t_new_args.dtype) == str(candle.f64) t_new_kwargs = t.to(device="cuda", dtype=candle.f64) assert t_new_kwargs.device == "cuda" assert str(t_new_kwargs.dtype) == str(candle.f64) other = Tensor(42.0).to("cuda").to(candle.f64) t_new_other_args = t.to(other) assert t_new_other_args.device == "cuda" assert str(t_new_other_args.dtype) == str(candle.f64) t_new_other_kwargs = t.to(other=other) assert t_new_other_kwargs.device == "cuda" assert str(t_new_other_kwargs.dtype) == str(candle.f64) def test_tensor_can_be_added(): t = Tensor(42.0) result = t + t assert result.values() == 84.0 result = t + 2.0 assert result.values() == 44.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_add(b) c = a + b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d + e def test_tensor_can_be_subtracted(): t = Tensor(42.0) result = t - t assert result.values() == 0 result = t - 2.0 assert result.values() == 40.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_sub(b) c = a - b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d - e def test_tensor_can_be_multiplied(): t = Tensor(42.0) result = t * t assert result.values() == 1764.0 result = t * 2.0 assert result.values() == 84.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_mul(b) c = a * b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d * e def test_tensor_can_be_divided(): t = Tensor(42.0) result = t / t assert result.values() == 1.0 result = t / 2.0 assert result.values() == 21.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_div(b) c = a / b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d / e
candle/candle-pyo3/tests/native/test_tensor.py/0
{ "file_path": "candle/candle-pyo3/tests/native/test_tensor.py", "repo_id": "candle", "token_count": 4688 }
//! Contrastive Language-Image Pre-Training //! //! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! //! - 💻 [GH Link](https://github.com/openai/CLIP) //! - 💻 Transformers Python [reference implementation](https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip) //! - 🤗 [HF Model](https://huggingface.co/openai/clip-vit-large-patch14-336) //! use self::{ text_model::{Activation, ClipTextTransformer}, vision_model::ClipVisionTransformer, }; use candle::{Result, Tensor, D}; pub mod text_model; pub mod vision_model; #[derive(Clone, Debug)] pub struct ClipModel { text_model: ClipTextTransformer, vision_model: ClipVisionTransformer, visual_projection: candle_nn::Linear, text_projection: candle_nn::Linear, logit_scale: Tensor, } #[derive(Clone, Debug)] pub enum EncoderConfig { Text(text_model::ClipTextConfig), Vision(vision_model::ClipVisionConfig), } impl EncoderConfig { pub fn embed_dim(&self) -> usize { match self { Self::Text(c) => c.embed_dim, Self::Vision(c) => c.embed_dim, } } pub fn num_attention_heads(&self) -> usize { match self { Self::Text(c) => c.num_attention_heads, Self::Vision(c) => c.num_attention_heads, } } pub fn intermediate_size(&self) -> usize { match self { Self::Text(c) => c.intermediate_size, Self::Vision(c) => c.intermediate_size, } } pub fn num_hidden_layers(&self) -> usize { match self { Self::Text(c) => c.num_hidden_layers, Self::Vision(c) => c.num_hidden_layers, } } pub fn activation(&self) -> Activation { match self { Self::Text(_c) => Activation::QuickGelu, Self::Vision(c) => c.activation, } } } #[derive(Clone, Debug)] pub struct ClipConfig { pub text_config: text_model::ClipTextConfig, pub vision_config: vision_model::ClipVisionConfig, pub logit_scale_init_value: f32, pub image_size: usize, } impl ClipConfig { // base image size is 224, model size is 600Mb pub fn vit_base_patch32() -> Self { let text_config = text_model::ClipTextConfig::vit_base_patch32(); let vision_config = vision_model::ClipVisionConfig::vit_base_patch32(); Self { text_config, vision_config, logit_scale_init_value: 2.6592, image_size: 224, } } } impl ClipModel { pub fn new(vs: candle_nn::VarBuilder, c: &ClipConfig) -> Result<Self> { let text_model = ClipTextTransformer::new(vs.pp("text_model"), &c.text_config)?; let vision_model = ClipVisionTransformer::new(vs.pp("vision_model"), &c.vision_config)?; let visual_projection = candle_nn::linear_no_bias( c.vision_config.embed_dim, c.vision_config.projection_dim, vs.pp("visual_projection"), )?; let text_projection = candle_nn::linear_no_bias( c.text_config.embed_dim, c.text_config.projection_dim, vs.pp("text_projection"), )?; // originally nn.Parameter let logit_scale = if vs.contains_tensor("logit_scale") { vs.get(&[], "logit_scale")? } else { Tensor::new(&[c.logit_scale_init_value], vs.device())? }; Ok(Self { text_model, vision_model, visual_projection, text_projection, logit_scale, }) } pub fn get_text_features(&self, input_ids: &Tensor) -> Result<Tensor> { input_ids .apply(&self.text_model)? .apply(&self.text_projection) } pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> { pixel_values .apply(&self.vision_model)? .apply(&self.visual_projection) } pub fn forward(&self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<(Tensor, Tensor)> { let image_features = self.get_image_features(pixel_values)?; let text_features = self.get_text_features(input_ids)?; let image_features_normalized = div_l2_norm(&image_features)?; let text_features_normalized = div_l2_norm(&text_features)?; let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?; let logit_scale = self.logit_scale.exp()?; let logits_per_text = logits_per_text.broadcast_mul(&logit_scale)?; let logits_per_image = logits_per_text.t()?; Ok((logits_per_text, logits_per_image)) } } pub fn div_l2_norm(v: &Tensor) -> Result<Tensor> { let l2_norm = v.sqr()?.sum_keepdim(D::Minus1)?.sqrt()?; v.broadcast_div(&l2_norm) }
candle/candle-transformers/src/models/clip/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/clip/mod.rs", "repo_id": "candle", "token_count": 2243 }
//! EVA-2 inference implementation. //! //! EVA-02 is a computer vision model that can be used as an ImageNet classifier. //! The model returns the probability for an image to belong to each of the 1000 //! ImageNet categories. //! //! - [Paper](https://arxiv.org/abs/2303.11331). EVA-02: A Visual Representation for Neon Genesis //! - [Code](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/eva2.py) //! //! # Example //! //! ```bash //! cargo run \ //! --example eva2 \ //! --release -- \ //! --image candle-examples/examples/yolo-v8/assets/bike.jpg //! //! > mountain bike, all-terrain bike, off-roader: 37.09% //! > maillot : 8.30% //! > alp : 2.13% //! > bicycle-built-for-two, tandem bicycle, tandem: 0.84% //! > crash helmet : 0.73% //! ``` //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width=640> //! </div> //! use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; const IMG_SIZE: usize = 448; const PATCH_SIZE: usize = 14; const NUM_CLASSES: usize = 1000; fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> { if bias { candle_nn::linear(in_dim, out_dim, vb) } else { candle_nn::linear_no_bias(in_dim, out_dim, vb) } } #[derive(Debug)] struct Attention { q: Linear, k: Linear, v: Linear, proj: Linear, rot_pos_embed: Tensor, num_heads: usize, scale: f64, } impl Attention { fn new( vb: VarBuilder, dim: usize, num_heads: usize, qkv_bias: bool, proj_bias: bool, rot_pos_embed: &Tensor, ) -> Result<Self> { let q = linear(vb.pp("q_proj"), dim, dim, qkv_bias)?; let k = linear(vb.pp("k_proj"), dim, dim, false)?; // no bias for Key let v = linear(vb.pp("v_proj"), dim, dim, qkv_bias)?; let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?; let rot_pos_embed = rot_pos_embed.clone(); let scale = 1. / ((dim / num_heads) as f64).sqrt(); Ok(Self { q, k, v, proj, rot_pos_embed, num_heads, scale, }) } } impl Attention { // See: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/pos_embed_sincos.py#L210 fn apply_rot_embed_cat(x: &Tensor, emb: &Tensor) -> Result<Tensor> { let cos_emb = emb.i((0.., 64..128))?; //.transpose(0, 1)?; let sin_emb = emb.i((0.., 0..64))?; //.transpose(0, 1)?; let index_even: [u32; 32] = (0u32..=63) .step_by(2) .collect::<Vec<_>>() .try_into() .expect("wrong size iterator"); let index_odd: [u32; 32] = (1u32..=63) .step_by(2) .collect::<Vec<_>>() .try_into() .expect("wrong size iterator"); let t_index_even = Tensor::new(&index_even, x.device())?; let t_index_odd = Tensor::new(&index_odd, x.device())?; let x_c = x.contiguous()?; let rot_x_even = x_c.index_select(&t_index_even, D::Minus1)?; let rot_x_odd_minus = (-1.0 * x_c.index_select(&t_index_odd, D::Minus1)?)?; let rot_x = Tensor::stack(&[&rot_x_odd_minus, &rot_x_even], D::Minus1)?.reshape(x.shape())?; x.broadcast_mul(&cos_emb)? + rot_x.broadcast_mul(&sin_emb)? } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, n, c) = xs.dims3()?; let qkv = Tensor::cat( &[ &self.q.forward(xs)?, &self.k.forward(xs)?, &self.v.forward(xs)?, ], 2, )? .reshape((b, n, 3, self.num_heads, c / self.num_heads))? .transpose(1, 2)? // 02134 .transpose(0, 1)? // 20134 .transpose(2, 3)?; // 20314 let q = qkv.i(0)?; let k = qkv.i(1)?.contiguous()?; let v = qkv.i(2)?.contiguous()?; let npt = 1; // num_prefix_tokens = 1 for CLS token let q = Tensor::cat( &[ &q.i((0.., 0.., ..npt, 0..))?, &Self::apply_rot_embed_cat(&q.i((0.., 0.., npt.., 0..))?, &self.rot_pos_embed)?, ], 2, )?; let k = Tensor::cat( &[ &k.i((0.., 0.., ..npt, 0..))?, &Self::apply_rot_embed_cat(&k.i((0.., 0.., npt.., 0..))?, &self.rot_pos_embed)?, ], 2, )?; let q = (q * self.scale)?; let attn = &q.matmul(&k.t()?)?; let attn = candle_nn::ops::softmax(attn, D::Minus1)?; let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?; self.proj.forward(&attn) } } #[derive(Debug)] struct Mlp { fc1_g: Linear, fc1_x: Linear, norm: LayerNorm, fc2: Linear, } impl Mlp { fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> { let out_features = in_features; let fc1_g = linear(vb.pp("fc1_g"), in_features, hidden_features, bias)?; let fc1_x = linear(vb.pp("fc1_x"), in_features, hidden_features, bias)?; let norm = layer_norm(hidden_features, 1e-6, vb.pp("norm"))?; let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?; Ok(Self { fc1_g, fc1_x, norm, fc2, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs_g = self.fc1_g.forward(xs)?.silu()?; let xs = self.fc1_x.forward(xs)?; let xs = self.norm.forward(&(xs_g.mul(&xs)?))?; self.fc2.forward(&xs) } } #[derive(Debug)] struct Block { norm1: LayerNorm, attn: Attention, norm2: LayerNorm, mlp: Mlp, } impl Block { fn new(vb: VarBuilder, dim: usize, num_heads: usize, rot_pos_embed: &Tensor) -> Result<Self> { let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?; let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true, rot_pos_embed)?; let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?; let hidden_dim = dim * 4 * 2 / 3; // 768 * 4 * 2 / 3 = 3072 * 2 / 3 = 2048 let mlp = Mlp::new(vb.pp("mlp"), dim, hidden_dim, true)?; Ok(Self { norm1, attn, norm2, mlp, }) } } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = &self.attn.forward(&self.norm1.forward(xs)?)?; let xs = (xs + residual)?; let residual = &xs; let xs = &self.mlp.forward(&self.norm2.forward(&xs)?)?; xs + residual } } #[derive(Debug)] struct PatchEmbed { proj: candle_nn::Conv2d, patch_size: (usize, usize), num_patches: usize, } impl PatchEmbed { fn new( vb: VarBuilder, img_size: usize, patch_size: usize, in_chans: usize, embed_dim: usize, ) -> Result<Self> { let config = candle_nn::Conv2dConfig { stride: patch_size, ..Default::default() }; let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?; let num_patches = (img_size / patch_size) * (img_size / patch_size); Ok(Self { proj, patch_size: (patch_size, patch_size), num_patches, }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _c, h, w) = xs.dims4()?; let (patch_h, patch_w) = self.patch_size; if (h % patch_h) != 0 { candle::bail!("image height {h} is not a multiple of patch height {patch_h}") } if (w % patch_w) != 0 { candle::bail!("image width {w} is not a multiple of patch width {patch_w}") } let xs = self.proj.forward(xs)?; let (b, c, h, w) = xs.dims4()?; // flatten embeddings. xs.reshape((b, c, h * w))?.transpose(1, 2) } } #[derive(Debug)] pub struct EVA2VisionTransformer { patch_embed: PatchEmbed, cls_token: Tensor, pos_embed: Tensor, blocks: Vec<Block>, norm: LayerNorm, head: Linear, } impl EVA2VisionTransformer { pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> { let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?; let cls_token = vb.get((1, 1, embed_dim), "cls_token")?; let pos_embed = vb.get((1, patch_embed.num_patches + 1, embed_dim), "pos_embed")?; let rot_pos_embed = vb.get((patch_embed.num_patches, 128), "rot_pos_embed")?; let head = linear(vb.pp("head"), embed_dim, NUM_CLASSES, true)?; let norm = layer_norm(embed_dim, 1e-6, vb.pp("norm"))?; let vb_b = vb.pp("blocks"); let blocks = (0..depth) .map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads, &rot_pos_embed)) .collect::<Result<Vec<_>>>()?; Ok(Self { patch_embed, cls_token, pos_embed, blocks, norm, head, }) } fn interpolate_pos_encoding( &self, xs: &Tensor, w: usize, h: usize, num_prefix_tokens: usize, ) -> Result<Tensor> { let npatch = xs.dim(1)? - 1; let n = self.pos_embed.dim(1)? - 1; let sqrt_n = (n as f64).sqrt(); if npatch == n && w == h { return Ok(self.pos_embed.clone()); } // Interpolate only local tokens, i.e. those after the CLS token let prefix_tokens_pos_embed = self.pos_embed.i((0.., ..num_prefix_tokens, 0..))?.clone(); let patch_pos_embed = &self.pos_embed.i((0.., num_prefix_tokens.., 0..))?; let dim = xs.dim(D::Minus1)?; let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1); let patch_pos_embed = patch_pos_embed .reshape((1, sqrt_n as usize, sqrt_n as usize, dim))? .transpose(2, 3)? .transpose(1, 2)?; // This uses bicubic interpolation in the original implementation. let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?; let el_count = patch_pos_embed.shape().elem_count(); let patch_pos_embed = patch_pos_embed .transpose(1, 2)? .transpose(2, 3)? .reshape((1, el_count / dim, dim))?; Tensor::cat(&[&prefix_tokens_pos_embed, &patch_pos_embed], 1) } fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _nc, w, h) = xs.dims4()?; if (w != IMG_SIZE) || (h != IMG_SIZE) { panic!("Error: The input tensor should have the shape: Bx3x518x518."); } let xs = self.patch_embed.forward(xs)?; let xs = Tensor::cat(&[&self.cls_token, &xs], 1)?; let xs = (&xs + &self.interpolate_pos_encoding(&xs, w, h, 1)?)?; Ok(xs) } fn get_intermediate_layers_not_chunked( &self, xs: &Tensor, blocks_to_take: &[usize], ) -> Result<Vec<Tensor>> { let mut xs = self.prepare_tokens_with_mask(xs)?; let mut output = Vec::new(); for (i, blk) in self.blocks.iter().enumerate() { xs = blk.forward(&xs)?; if blocks_to_take.contains(&i) { output.push(xs.clone()); } } if output.len() != blocks_to_take.len() { candle::bail!( "only {} / {} blocks found", output.len(), blocks_to_take.len() ); } Ok(output) } pub fn get_intermediate_layers( &self, xs: &Tensor, blocks_to_take: &[usize], reshape: bool, return_class_token: bool, norm: bool, ) -> Result<Tensor> { let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?; let outputs = if norm { outputs .iter() .map(|out| self.norm.forward(out)) .collect::<Result<Vec<_>>>()? } else { outputs }; let class_tokens = outputs .iter() .map(|out| out.i((.., 0))) .collect::<Result<Vec<_>>>()?; let outputs = outputs .iter() .map(|out| out.i((.., 1..))) .collect::<Result<Vec<_>>>()?; let outputs = if reshape { let (b, _c, w, h) = xs.dims4()?; let patch_size = self.patch_embed.patch_size.0; let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size)); outputs .iter() .map(|out| { out.reshape((b, w / patch_size, h / patch_size, num_channels))? .transpose(2, 3)? .transpose(1, 2) }) .collect::<Result<Vec<_>>>()? } else { outputs }; let outputs = if return_class_token { outputs .iter() .zip(class_tokens.iter()) .map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1)) .collect::<Result<Vec<_>>>()? } else { outputs }; Tensor::stack(&outputs[..], 0) } } impl Module for EVA2VisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.prepare_tokens_with_mask(xs)?; for blk in self.blocks.iter() { xs = blk.forward(&xs)? } let xs_moy_local_tokens = xs.i((.., 1..))?.mean(1)?; let xs_norm = self.norm.forward(&xs_moy_local_tokens)?; self.head.forward(&xs_norm) } } pub fn vit_base(vb: VarBuilder) -> Result<EVA2VisionTransformer> { EVA2VisionTransformer::new(vb, 12, 768, 12) } pub fn vit_large(vb: VarBuilder) -> Result<EVA2VisionTransformer> { EVA2VisionTransformer::new(vb, 24, 1024, 16) }
candle/candle-transformers/src/models/eva2.rs/0
{ "file_path": "candle/candle-transformers/src/models/eva2.rs", "repo_id": "candle", "token_count": 7638 }
//! Llama2 inference implementation. //! //! See ["LLaMA 2: Open Foundation and Fine-Tuned Chat Models"](https://arxiv.org/abs/2307.09288) //! //! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/lmz/candle-llama2) //! - 💻 llama2.c [GH Link](https://github.com/karpathy/llama2.c) //! use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::linear_no_bias as linear; use candle_nn::{embedding, rms_norm, Embedding, Linear, Module, RmsNorm, VarBuilder}; use std::collections::HashMap; #[derive(Debug, Clone)] pub struct Config { pub dim: usize, // transformer dimension pub hidden_dim: usize, // for ffn layers pub n_layers: usize, // number of layers pub n_heads: usize, // number of query heads pub n_kv_heads: usize, // number of key/value heads (can be < query heads because of multiquery) pub vocab_size: usize, // vocabulary size, usually 256 (byte-level) pub seq_len: usize, // max sequence length pub norm_eps: f64, } impl Config { pub fn tiny_260k() -> Self { Self { dim: 64, hidden_dim: 768, n_layers: 5, n_heads: 8, n_kv_heads: 4, vocab_size: 32000, seq_len: 512, norm_eps: 1e-5, } } pub fn tiny_15m() -> Self { Self { dim: 288, hidden_dim: 768, n_layers: 6, n_heads: 6, n_kv_heads: 6, vocab_size: 32000, seq_len: 256, norm_eps: 1e-5, } } pub fn tiny_42m() -> Self { Self { dim: 512, hidden_dim: 768, n_layers: 8, n_heads: 8, n_kv_heads: 8, vocab_size: 32000, seq_len: 1024, norm_eps: 1e-5, } } pub fn tiny_110m() -> Self { Self { dim: 768, hidden_dim: 768, n_layers: 12, n_heads: 12, n_kv_heads: 12, vocab_size: 32000, seq_len: 1024, norm_eps: 1e-5, } } } #[derive(Debug, Clone)] pub struct Cache { masks: HashMap<usize, Tensor>, pub use_kv_cache: bool, pub kvs: Vec<Option<(Tensor, Tensor)>>, pub cos: Tensor, pub sin: Tensor, device: Device, } impl Cache { pub fn new(use_kv_cache: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_elem = cfg.dim / cfg.n_heads; let theta: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / n_elem as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), vb.device())?; let idx_theta = Tensor::arange(0, cfg.seq_len as u32, vb.device())? .to_dtype(DType::F32)? .reshape((cfg.seq_len, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let precomputed_cos = idx_theta.cos()?; let precomputed_sin = idx_theta.sin()?; let freq_cis_real = vb .get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_real") .unwrap_or(precomputed_cos); let freq_cis_imag = vb .get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_imag") .unwrap_or(precomputed_sin); let cos = freq_cis_real.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; let sin = freq_cis_imag.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; Ok(Self { masks: HashMap::new(), use_kv_cache, kvs: vec![None; cfg.n_layers], cos, sin, device: vb.device().clone(), }) } pub fn mask(&mut self, t: usize) -> Result<Tensor> { if let Some(mask) = self.masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), &self.device)?; self.masks.insert(t, mask.clone()); Ok(mask) } } } fn silu(xs: &Tensor) -> Result<Tensor> { xs / (xs.neg()?.exp()? + 1.0)? } #[derive(Debug, Clone)] struct CausalSelfAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_head: usize, n_key_value_head: usize, head_dim: usize, } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> { let (b_sz, seq_len, h, n_embd) = x.dims4()?; let cos = cache.cos.i(index_pos..index_pos + seq_len)?; let sin = cache.sin.i(index_pos..index_pos + seq_len)?; let cos = cos.unsqueeze(1)?; let sin = sin.unsqueeze(1)?; let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?; Ok(rope) } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let (b_sz, seq_len, n_embd) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?; let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let q = self.apply_rotary_emb(&q, index_pos, cache)?; let mut k = self.apply_rotary_emb(&k, index_pos, cache)?; if cache.use_kv_cache { if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] { k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?; } cache.kvs[block_idx] = Some((k.clone(), v.clone())) } let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let q = q.transpose(1, 2)?.contiguous()?; let k = k.transpose(1, 2)?.contiguous()?; let v = v.transpose(1, 2)?.contiguous()?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let att = if seq_len <= 1 { att } else { let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?; masked_fill(&att, &mask, f32::NEG_INFINITY)? }; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = att.matmul(&v.contiguous()?)?; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_head / self.n_key_value_head; if n_rep == 1 { Ok(x) } else { let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?; let x = x .unsqueeze(3)? .expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))? .reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?; Ok(x) } } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let size_in = cfg.dim; let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads; let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads; let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_head: cfg.n_heads, n_key_value_head: cfg.n_kv_heads, head_dim: cfg.dim / cfg.n_heads, }) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct Mlp { c_fc1: Linear, c_fc2: Linear, c_proj: Linear, } impl Mlp { fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self { Self { c_fc1, c_fc2, c_proj, } } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h_size = cfg.dim; let i_size = cfg.hidden_dim; let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; Ok(Self::new(c_fc1, c_fc2, c_proj)) } } #[derive(Debug, Clone)] struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, } impl Block { fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { Self { rms_1, attn, rms_2, mlp, } } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?; let mlp = Mlp::load(vb.pp("mlp"), cfg)?; let input_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?; Ok(Self::new( input_layernorm, attn, post_attention_layernorm, mlp, )) } } #[derive(Debug, Clone)] pub struct Llama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, pub config: Config, } impl Llama { pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> { let (_b_sz, _seq_len) = x.dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx, cache)?; } let x = self.ln_f.forward(&x)?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let wte = embedding(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.n_layers) .map(|i| Block::load(vb.pp(format!("model.layers.{i}")), &cfg).unwrap()) .collect(); Ok(Self { wte, blocks, ln_f, lm_head, config: cfg, }) } }
candle/candle-transformers/src/models/llama2_c.rs/0
{ "file_path": "candle/candle-transformers/src/models/llama2_c.rs", "repo_id": "candle", "token_count": 6603 }
//! Mixtral Model, a sparse mixture of expert model based on the Mistral architecture //! //! See Mixtral model details at: //! - [Hugging Face](https://huggingface.co/docs/transformers/model_doc/mixtral) //! - [Mixtral-8x7B Blog Post](https://mistral.ai/news/mixtral-of-experts/) //! //! The model uses a mixture of experts architecture with: //! - 8 experts per layer //! - Top 2 expert routing //! - Sliding window attention //! - RoPE embeddings //! //! References: //! - [Hugging Face Implementation](https://github.com/huggingface/transformers/blob/main/src/transformers/models/mixtral/modeling_mixtral.py) //! - [Mixtral Blog Post](https://mistral.ai/news/mixtral-of-experts/) //! use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm}; /// Mixtral Model /// https://github.com/huggingface/transformers/blob/main/src/transformers/models/mixtral/modeling_mixtral.py /// https://mistral.ai/news/mixtral-of-experts/ use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use serde::Deserialize; use std::sync::Arc; /// https://github.com/huggingface/transformers/blob/1a585c1222a56bcaecc070966d558d4a9d862e83/src/transformers/models/mixtral/configuration_mixtral.py#L113 #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) hidden_size: usize, pub(crate) intermediate_size: usize, pub(crate) num_hidden_layers: usize, pub(crate) num_attention_heads: usize, pub(crate) num_key_value_heads: usize, pub(crate) hidden_act: Activation, pub(crate) max_position_embeddings: usize, pub(crate) rms_norm_eps: f64, pub(crate) rope_theta: f64, pub(crate) sliding_window: usize, pub(crate) num_experts_per_tok: usize, pub(crate) num_local_experts: usize, pub(crate) use_flash_attn: bool, } impl Config { /// https://huggingface.co/mistralai/Mixtral-8x7B-v0.1/blob/main/config.json pub fn v0_1_8x7b(use_flash_attn: bool) -> Self { Self { vocab_size: 32000, hidden_size: 4096, intermediate_size: 14336, num_hidden_layers: 32, num_attention_heads: 32, num_key_value_heads: 8, hidden_act: Activation::Silu, max_position_embeddings: 32768, rms_norm_eps: 1e-5, rope_theta: 1e6, sliding_window: 4096, num_experts_per_tok: 2, num_local_experts: 8, use_flash_attn, } } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / (cfg.rope_theta as f32).powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, use_flash_attn: bool, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, use_flash_attn: cfg.use_flash_attn, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?; let attn_output = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = query_states.transpose(1, 2)?; let k = key_states.transpose(1, 2)?; let v = value_states.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)? } else { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] struct BlockSparseTop2MLP { w1: Linear, w2: Linear, w3: Linear, act_fn: Activation, } impl BlockSparseTop2MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let w1 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w1"))?; let w2 = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("w2"))?; let w3 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w3"))?; Ok(Self { w1, w2, w3, act_fn: cfg.hidden_act, }) } } impl Module for BlockSparseTop2MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.w1)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.w3)?; (lhs * rhs)?.apply(&self.w2) } } #[derive(Debug, Clone)] struct SparseMoeBlock { gate: Linear, experts: Vec<BlockSparseTop2MLP>, num_experts_per_tok: usize, } impl SparseMoeBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let gate = linear_no_bias(cfg.hidden_size, cfg.num_local_experts, vb.pp("gate"))?; let mut experts = Vec::with_capacity(cfg.num_local_experts); let vb = vb.pp("experts"); for idx in 0..cfg.num_local_experts { let expert = BlockSparseTop2MLP::new(cfg, vb.pp(idx))?; experts.push(expert) } Ok(SparseMoeBlock { gate, experts, num_experts_per_tok: cfg.num_experts_per_tok, }) } } impl Module for SparseMoeBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b_size, seq_len, hidden_dim) = xs.dims3()?; let xs = xs.reshape(((), hidden_dim))?; let router_logits = xs.apply(&self.gate)?; let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?; // In order to extract topk, we extract the data from the tensor and manipulate it // directly. Maybe we will want to use some custom ops instead at some point. let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?; // routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) // top_x contains the row indexes to evaluate for each expert. let mut top_x = vec![vec![]; self.experts.len()]; let mut selected_rws = vec![vec![]; self.experts.len()]; for (row_idx, rw) in routing_weights.iter().enumerate() { let mut dst = (0..rw.len() as u32).collect::<Vec<u32>>(); dst.sort_by(|&i, &j| rw[j as usize].total_cmp(&rw[i as usize])); let mut sum_routing_weights = 0f32; for &expert_idx in dst.iter().take(self.num_experts_per_tok) { let expert_idx = expert_idx as usize; let routing_weight = rw[expert_idx]; sum_routing_weights += routing_weight; top_x[expert_idx].push(row_idx as u32); } for &expert_idx in dst.iter().take(self.num_experts_per_tok) { let expert_idx = expert_idx as usize; let routing_weight = rw[expert_idx]; selected_rws[expert_idx].push(routing_weight / sum_routing_weights) } } // routing_weights /= routing_weights.sum(dim=-1, keepdim=True) // expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) let mut ys = xs.zeros_like()?; for (expert_idx, expert_layer) in self.experts.iter().enumerate() { let top_x = &top_x[expert_idx]; if top_x.is_empty() { continue; } let top_x = Tensor::new(top_x.as_slice(), xs.device())?; let selected_rws = Tensor::new(selected_rws[expert_idx].as_slice(), xs.device())?.reshape(((), 1))?; // Index the correct hidden states and compute the expert hidden state for // the current expert. We need to make sure to multiply the output hidden // states by `routing_weights` on the corresponding tokens (top-1 and top-2) let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?; // current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None]) let current_hidden_states = expert_layer.forward(&current_state)?; let current_hidden_states = current_hidden_states.broadcast_mul(&selected_rws)?; ys = ys.index_add(&top_x, &current_hidden_states, 0)?; } let ys = ys.reshape((b_size, seq_len, hidden_dim))?; Ok(ys) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, block_sparse_moe: SparseMoeBlock, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let block_sparse_moe = SparseMoeBlock::new(cfg, vb.pp("block_sparse_moe"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, block_sparse_moe, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs .apply(&self.post_attention_layernorm)? .apply(&self.block_sparse_moe)?; residual + xs } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, sliding_window: usize, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, sliding_window: cfg.sliding_window, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
candle/candle-transformers/src/models/mixtral.rs/0
{ "file_path": "candle/candle-transformers/src/models/mixtral.rs", "repo_id": "candle", "token_count": 9110 }
//! Mistral model implementation with quantization support. //! //! Mistral is a large language model optimized for efficiency. //! This implementation provides quantization for reduced memory and compute. //! //! Key characteristics: //! - Sliding window attention mechanism //! - Grouped query attention (GQA) //! - RMSNorm for layer normalization //! - Rotary positional embeddings (RoPE) //! - Support for 8-bit quantization //! //! References: //! - [Mistral Paper](https://arxiv.org/abs/2310.06825) //! - [Model Card](https://huggingface.co/mistralai/Mistral-7B-v0.1) //! use crate::quantized_nn::{linear_no_bias, Embedding, Linear, RmsNorm}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::Activation; use std::sync::Arc; pub use crate::models::mistral::Config; #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(cfg: &Config, dev: &Device) -> Result<Self> { let rope_theta = cfg.rope_theta as f32; let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / rope_theta.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(DType::F32)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(q, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(k, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, sliding_window: Option<usize>, device: Device, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, sliding_window: cfg.sliding_window, device: vb.device().clone(), }) } fn prepare_decoder_attention_mask( &self, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let sliding_window = self.sliding_window.unwrap_or(tgt_len + 1); let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((1, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(DType::F32) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (_b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .contiguous()? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/quantized_mistral.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_mistral.rs", "repo_id": "candle", "token_count": 5831 }
//! # ResNet Implementation //! //! Implementation of ResNet architectures as described in the paper: //! //! ## Reference //! //! [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) //! He et al. (2015) //! //! This paper introduced ResNet, a deep neural network architecture that utilizes //! skip connections ("residual connections") to enable training of very deep networks. use candle::{Result, D}; use candle_nn::{batch_norm, Conv2d, Func, VarBuilder}; fn conv2d( c_in: usize, c_out: usize, ksize: usize, padding: usize, stride: usize, vb: VarBuilder, ) -> Result<Conv2d> { let conv2d_cfg = candle_nn::Conv2dConfig { stride, padding, ..Default::default() }; candle_nn::conv2d_no_bias(c_in, c_out, ksize, conv2d_cfg, vb) } fn downsample(c_in: usize, c_out: usize, stride: usize, vb: VarBuilder) -> Result<Func> { if stride != 1 || c_in != c_out { let conv = conv2d(c_in, c_out, 1, 0, stride, vb.pp(0))?; let bn = batch_norm(c_out, 1e-5, vb.pp(1))?; Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false))) } else { Ok(Func::new(|xs| Ok(xs.clone()))) } } fn basic_block(c_in: usize, c_out: usize, stride: usize, vb: VarBuilder) -> Result<Func> { let conv1 = conv2d(c_in, c_out, 3, 1, stride, vb.pp("conv1"))?; let bn1 = batch_norm(c_out, 1e-5, vb.pp("bn1"))?; let conv2 = conv2d(c_out, c_out, 3, 1, 1, vb.pp("conv2"))?; let bn2 = batch_norm(c_out, 1e-5, vb.pp("bn2"))?; let downsample = downsample(c_in, c_out, stride, vb.pp("downsample"))?; Ok(Func::new(move |xs| { let ys = xs .apply(&conv1)? .apply_t(&bn1, false)? .relu()? .apply(&conv2)? .apply_t(&bn2, false)?; (xs.apply(&downsample)? + ys)?.relu() })) } fn basic_layer( c_in: usize, c_out: usize, stride: usize, cnt: usize, vb: VarBuilder, ) -> Result<Func> { let mut layers = Vec::with_capacity(cnt); for index in 0..cnt { let l_in = if index == 0 { c_in } else { c_out }; let stride = if index == 0 { stride } else { 1 }; layers.push(basic_block(l_in, c_out, stride, vb.pp(index))?) } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)? } Ok(xs) })) } fn resnet( nclasses: Option<usize>, c1: usize, c2: usize, c3: usize, c4: usize, vb: VarBuilder, ) -> Result<Func> { let conv1 = conv2d(3, 64, 7, 3, 2, vb.pp("conv1"))?; let bn1 = batch_norm(64, 1e-5, vb.pp("bn1"))?; let layer1 = basic_layer(64, 64, 1, c1, vb.pp("layer1"))?; let layer2 = basic_layer(64, 128, 2, c2, vb.pp("layer2"))?; let layer3 = basic_layer(128, 256, 2, c3, vb.pp("layer3"))?; let layer4 = basic_layer(256, 512, 2, c4, vb.pp("layer4"))?; let fc = match nclasses { None => None, Some(nclasses) => { let linear = candle_nn::linear(512, nclasses, vb.pp("fc"))?; Some(linear) } }; Ok(Func::new(move |xs| { let xs = xs .apply(&conv1)? .apply_t(&bn1, false)? .relu()? .pad_with_same(D::Minus1, 1, 1)? .pad_with_same(D::Minus2, 1, 1)? .max_pool2d_with_stride(3, 2)? .apply(&layer1)? .apply(&layer2)? .apply(&layer3)? .apply(&layer4)? .mean(D::Minus1)? .mean(D::Minus1)?; match &fc { None => Ok(xs), Some(fc) => xs.apply(fc), } })) } /// Creates a ResNet-18 model. pub fn resnet18(num_classes: usize, vb: VarBuilder) -> Result<Func> { resnet(Some(num_classes), 2, 2, 2, 2, vb) } pub fn resnet18_no_final_layer(vb: VarBuilder) -> Result<Func> { resnet(None, 2, 2, 2, 2, vb) } /// Creates a ResNet-34 model. pub fn resnet34(num_classes: usize, vb: VarBuilder) -> Result<Func> { resnet(Some(num_classes), 3, 4, 6, 3, vb) } pub fn resnet34_no_final_layer(vb: VarBuilder) -> Result<Func> { resnet(None, 3, 4, 6, 3, vb) } // Bottleneck versions for ResNet 50, 101, and 152. fn bottleneck_block( c_in: usize, c_out: usize, stride: usize, e: usize, vb: VarBuilder, ) -> Result<Func> { let e_dim = e * c_out; let conv1 = conv2d(c_in, c_out, 1, 0, 1, vb.pp("conv1"))?; let bn1 = batch_norm(c_out, 1e-5, vb.pp("bn1"))?; let conv2 = conv2d(c_out, c_out, 3, 1, stride, vb.pp("conv2"))?; let bn2 = batch_norm(c_out, 1e-5, vb.pp("bn2"))?; let conv3 = conv2d(c_out, e_dim, 1, 0, 1, vb.pp("conv3"))?; let bn3 = batch_norm(e_dim, 1e-5, vb.pp("bn3"))?; let downsample = downsample(c_in, e_dim, stride, vb.pp("downsample"))?; Ok(Func::new(move |xs| { let ys = xs .apply(&conv1)? .apply_t(&bn1, false)? .relu()? .apply(&conv2)? .apply_t(&bn2, false)? .relu()? .apply(&conv3)? .apply_t(&bn3, false)?; (xs.apply(&downsample)? + ys)?.relu() })) } fn bottleneck_layer( c_in: usize, c_out: usize, stride: usize, cnt: usize, vb: VarBuilder, ) -> Result<Func> { let mut layers = Vec::with_capacity(cnt); for index in 0..cnt { let l_in = if index == 0 { c_in } else { 4 * c_out }; let stride = if index == 0 { stride } else { 1 }; layers.push(bottleneck_block(l_in, c_out, stride, 4, vb.pp(index))?) } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)? } Ok(xs) })) } fn bottleneck_resnet( nclasses: Option<usize>, c1: usize, c2: usize, c3: usize, c4: usize, vb: VarBuilder, ) -> Result<Func> { let conv1 = conv2d(3, 64, 7, 3, 2, vb.pp("conv1"))?; let bn1 = batch_norm(64, 1e-5, vb.pp("bn1"))?; let layer1 = bottleneck_layer(64, 64, 1, c1, vb.pp("layer1"))?; let layer2 = bottleneck_layer(4 * 64, 128, 2, c2, vb.pp("layer2"))?; let layer3 = bottleneck_layer(4 * 128, 256, 2, c3, vb.pp("layer3"))?; let layer4 = bottleneck_layer(4 * 256, 512, 2, c4, vb.pp("layer4"))?; let fc = match nclasses { None => None, Some(nclasses) => { let linear = candle_nn::linear(4 * 512, nclasses, vb.pp("fc"))?; Some(linear) } }; Ok(Func::new(move |xs| { let xs = xs .apply(&conv1)? .apply_t(&bn1, false)? .relu()? .pad_with_same(D::Minus1, 1, 1)? .pad_with_same(D::Minus2, 1, 1)? .max_pool2d_with_stride(3, 2)? .apply(&layer1)? .apply(&layer2)? .apply(&layer3)? .apply(&layer4)? .mean(D::Minus1)? .mean(D::Minus1)?; match &fc { None => Ok(xs), Some(fc) => xs.apply(fc), } })) } pub fn resnet50(num_classes: usize, vb: VarBuilder) -> Result<Func> { bottleneck_resnet(Some(num_classes), 3, 4, 6, 3, vb) } pub fn resnet50_no_final_layer(vb: VarBuilder) -> Result<Func> { bottleneck_resnet(None, 3, 4, 6, 3, vb) } pub fn resnet101(num_classes: usize, vb: VarBuilder) -> Result<Func> { bottleneck_resnet(Some(num_classes), 3, 4, 23, 3, vb) } pub fn resnet101_no_final_layer(vb: VarBuilder) -> Result<Func> { bottleneck_resnet(None, 3, 4, 23, 3, vb) } pub fn resnet152(num_classes: usize, vb: VarBuilder) -> Result<Func> { bottleneck_resnet(Some(num_classes), 3, 8, 36, 3, vb) } pub fn resnet152_no_final_layer(vb: VarBuilder) -> Result<Func> { bottleneck_resnet(None, 3, 8, 36, 3, vb) }
candle/candle-transformers/src/models/resnet.rs/0
{ "file_path": "candle/candle-transformers/src/models/resnet.rs", "repo_id": "candle", "token_count": 4023 }
use candle::{Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug)] pub struct TimestepEmbedding { linear_1: nn::Linear, linear_2: nn::Linear, } impl TimestepEmbedding { // act_fn: "silu" pub fn new(vs: nn::VarBuilder, channel: usize, time_embed_dim: usize) -> Result<Self> { let linear_1 = nn::linear(channel, time_embed_dim, vs.pp("linear_1"))?; let linear_2 = nn::linear(time_embed_dim, time_embed_dim, vs.pp("linear_2"))?; Ok(Self { linear_1, linear_2 }) } } impl Module for TimestepEmbedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = nn::ops::silu(&self.linear_1.forward(xs)?)?; self.linear_2.forward(&xs) } } #[derive(Debug)] pub struct Timesteps { num_channels: usize, flip_sin_to_cos: bool, downscale_freq_shift: f64, } impl Timesteps { pub fn new(num_channels: usize, flip_sin_to_cos: bool, downscale_freq_shift: f64) -> Self { Self { num_channels, flip_sin_to_cos, downscale_freq_shift, } } } impl Module for Timesteps { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let half_dim = (self.num_channels / 2) as u32; let exponent = (Tensor::arange(0, half_dim, xs.device())?.to_dtype(candle::DType::F32)? * -f64::ln(10000.))?; let exponent = (exponent / (half_dim as f64 - self.downscale_freq_shift))?; let emb = exponent.exp()?.to_dtype(xs.dtype())?; // emb = timesteps[:, None].float() * emb[None, :] let emb = xs.unsqueeze(D::Minus1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let (cos, sin) = (emb.cos()?, emb.sin()?); let emb = if self.flip_sin_to_cos { Tensor::cat(&[&cos, &sin], D::Minus1)? } else { Tensor::cat(&[&sin, &cos], D::Minus1)? }; if self.num_channels % 2 == 1 { emb.pad_with_zeros(D::Minus2, 0, 1) } else { Ok(emb) } } }
candle/candle-transformers/src/models/stable_diffusion/embeddings.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/embeddings.rs", "repo_id": "candle", "token_count": 1008 }
//! Vision Transformer (ViT) implementation. //! //! Vision Transformer applies transformer architecture to image classification //! by splitting images into patches and processing them as a sequence. //! //! Key characteristics: //! - Image patches as sequence tokens //! - Self-attention between patches //! - Position embeddings //! - CLS token for classification //! - Layer normalization //! //! References: //! - [ViT Paper](https://arxiv.org/abs/2010.11929) //! - [Model Card](https://huggingface.co/google/vit-base-patch16-224) //! use crate::models::with_tracing::{conv2d, linear, linear_no_bias, Conv2d, Linear}; use candle::{IndexOp, Module, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, VarBuilder}; // https://github.com/huggingface/transformers/blob/main/src/transformers/models/vit/configuration_vit.py #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub hidden_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub intermediate_size: usize, pub hidden_act: candle_nn::Activation, pub layer_norm_eps: f64, pub image_size: usize, pub patch_size: usize, pub num_channels: usize, pub qkv_bias: bool, } impl Config { // https://huggingface.co/google/vit-base-patch16-224/blob/main/config.json pub fn vit_base_patch16_224() -> Self { Self { hidden_size: 768, num_hidden_layers: 12, num_attention_heads: 12, intermediate_size: 3072, hidden_act: candle_nn::Activation::Gelu, layer_norm_eps: 1e-12, image_size: 224, patch_size: 16, num_channels: 3, qkv_bias: true, } } pub fn microsoft_trocr_base_handwritten() -> Self { Self { hidden_size: 768, num_hidden_layers: 12, num_attention_heads: 12, intermediate_size: 3072, hidden_act: candle_nn::Activation::Gelu, layer_norm_eps: 1e-12, image_size: 384, patch_size: 16, num_channels: 3, qkv_bias: false, } } } #[derive(Debug, Clone)] struct PatchEmbeddings { num_patches: usize, projection: Conv2d, } impl PatchEmbeddings { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let image_size = cfg.image_size; let patch_size = cfg.patch_size; let num_patches = (image_size / patch_size) * (image_size / patch_size); let conv_cfg = candle_nn::Conv2dConfig { stride: patch_size, ..Default::default() }; let projection = conv2d( cfg.num_channels, cfg.hidden_size, patch_size, conv_cfg, vb.pp("projection"), )?; Ok(Self { num_patches, projection, }) } } impl Module for PatchEmbeddings { fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> { let (_b_size, _num_channels, _height, _width) = pixel_values.dims4()?; self.projection .forward(pixel_values)? .flatten_from(2)? .transpose(1, 2) } } #[derive(Debug, Clone)] pub struct Embeddings { cls_token: Tensor, mask_token: Option<Tensor>, patch_embeddings: PatchEmbeddings, position_embeddings: Tensor, hidden_size: usize, } impl Embeddings { pub fn new(cfg: &Config, use_mask_token: bool, vb: VarBuilder) -> Result<Self> { let hidden_size = cfg.hidden_size; let cls_token = vb.get((1, 1, hidden_size), "cls_token")?; let mask_token = if use_mask_token { Some(vb.get((1, 1, hidden_size), "mask_token")?) } else { None }; let patch_embeddings = PatchEmbeddings::new(cfg, vb.pp("patch_embeddings"))?; let num_patches = patch_embeddings.num_patches; let position_embeddings = vb.get((1, num_patches + 1, hidden_size), "position_embeddings")?; Ok(Self { cls_token, mask_token, patch_embeddings, position_embeddings, hidden_size, }) } fn interpolate_pos_encoding( &self, _embeddings: &Tensor, _height: usize, _width: usize, ) -> Result<Tensor> { todo!() } pub fn forward( &self, pixel_values: &Tensor, bool_masked_pos: Option<&Tensor>, interpolate_pos_encoding: bool, ) -> Result<Tensor> { let (b_size, _num_channels, height, width) = pixel_values.dims4()?; let embeddings = self.patch_embeddings.forward(pixel_values)?; let embeddings = match (bool_masked_pos, &self.mask_token) { (None, _) => embeddings, (Some(_), None) => candle::bail!("bool_masked_pos set without mask_token"), (Some(bool_masked_pos), Some(mask_tokens)) => { let seq_len = embeddings.dim(1)?; let mask_tokens = mask_tokens.broadcast_as((b_size, seq_len, self.hidden_size))?; let mask = bool_masked_pos .unsqueeze(D::Minus1)? .to_dtype(mask_tokens.dtype())?; ((mask_tokens * &mask)? - (embeddings * (mask - 1.)?)?)? } }; let cls_tokens = self.cls_token.broadcast_as((b_size, 1, self.hidden_size))?; let embeddings = Tensor::cat(&[&cls_tokens, &embeddings], 1)?; if interpolate_pos_encoding { let pos = self.interpolate_pos_encoding(&embeddings, height, width)?; embeddings.broadcast_add(&pos) } else { embeddings.broadcast_add(&self.position_embeddings) } } } #[derive(Debug, Clone)] struct SelfAttention { query: Linear, key: Linear, value: Linear, num_attention_heads: usize, attention_head_size: usize, } impl SelfAttention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention_head_size = cfg.hidden_size / cfg.num_attention_heads; let num_attention_heads = cfg.num_attention_heads; let all_head_size = num_attention_heads * attention_head_size; let linear = |name| { if cfg.qkv_bias { linear(cfg.hidden_size, all_head_size, vb.pp(name)) } else { linear_no_bias(cfg.hidden_size, all_head_size, vb.pp(name)) } }; let query = linear("query")?; let key = linear("key")?; let value = linear("value")?; Ok(Self { query, key, value, num_attention_heads, attention_head_size, }) } fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> { let (b_size, seq_len, _) = xs.dims3()?; xs.reshape(( b_size, seq_len, self.num_attention_heads, self.attention_head_size, ))? .permute((0, 2, 1, 3)) } } impl Module for SelfAttention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let query = self.query.forward(xs)?; let key = self.key.forward(xs)?; let value = self.value.forward(xs)?; let query = self.transpose_for_scores(&query)?.contiguous()?; let key = self.transpose_for_scores(&key)?.contiguous()?; let value = self.transpose_for_scores(&value)?.contiguous()?; let attention_scores = (query.matmul(&key.t()?)? / f64::sqrt(self.attention_head_size as f64))?; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; attention_probs .matmul(&value)? .permute((0, 2, 1, 3))? .contiguous()? .flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct SelfOutput { dense: Linear, } impl SelfOutput { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?; Ok(Self { dense }) } } impl Module for SelfOutput { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense) } } #[derive(Debug, Clone)] struct Attention { attention: SelfAttention, output: SelfOutput, } impl Attention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention = SelfAttention::new(cfg, vb.pp("attention"))?; let output = SelfOutput::new(cfg, vb.pp("output"))?; Ok(Self { attention, output }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.attention)?.apply(&self.output) } } #[derive(Debug, Clone)] struct Intermediate { dense: Linear, intermediate_act_fn: candle_nn::Activation, } impl Intermediate { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("dense"))?; Ok(Self { dense, intermediate_act_fn: cfg.hidden_act, }) } } impl Module for Intermediate { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense)?.apply(&self.intermediate_act_fn) } } #[derive(Debug, Clone)] struct Output { dense: Linear, } impl Output { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("dense"))?; Ok(Self { dense }) } fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { xs.apply(&self.dense)? + input_tensor } } #[derive(Debug, Clone)] struct Layer { attention: Attention, intermediate: Intermediate, output: Output, layernorm_before: LayerNorm, layernorm_after: LayerNorm, } impl Layer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention = Attention::new(cfg, vb.pp("attention"))?; let intermediate = Intermediate::new(cfg, vb.pp("intermediate"))?; let output = Output::new(cfg, vb.pp("output"))?; let h_sz = cfg.hidden_size; let layernorm_before = layer_norm(h_sz, cfg.layer_norm_eps, vb.pp("layernorm_before"))?; let layernorm_after = layer_norm(h_sz, cfg.layer_norm_eps, vb.pp("layernorm_after"))?; Ok(Self { attention, intermediate, output, layernorm_after, layernorm_before, }) } } impl Module for Layer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = (xs.apply(&self.layernorm_before)?.apply(&self.attention)? + xs)?; let ys = xs.apply(&self.layernorm_after)?.apply(&self.intermediate)?; self.output.forward(&ys, &xs) } } #[derive(Debug, Clone)] pub struct Encoder { layers: Vec<Layer>, } impl Encoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("layer"); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); for i in 0..cfg.num_hidden_layers { let layer = Layer::new(cfg, vb.pp(i))?; layers.push(layer) } Ok(Self { layers }) } } impl Module for Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = xs.apply(layer)? } Ok(xs) } } #[derive(Debug, Clone)] pub struct Model { embeddings: Embeddings, encoder: Encoder, layernorm: LayerNorm, // no need for pooling layer for image classification classifier: Linear, } impl Model { pub fn new(cfg: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> { let vb_v = vb.pp("vit"); let embeddings = Embeddings::new(cfg, false, vb_v.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb_v.pp("encoder"))?; let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_v.pp("layernorm"))?; let classifier = linear(cfg.hidden_size, num_labels, vb.pp("classifier"))?; Ok(Self { embeddings, encoder, layernorm, classifier, }) } pub fn forward(&self, xs: &Tensor) -> Result<Tensor> { let embedding_output = self.embeddings.forward(xs, None, false)?; let encoder_outputs = self.encoder.forward(&embedding_output)?; encoder_outputs .i((.., 0, ..))? .apply(&self.layernorm)? .apply(&self.classifier) } }
candle/candle-transformers/src/models/vit.rs/0
{ "file_path": "candle/candle-transformers/src/models/vit.rs", "repo_id": "candle", "token_count": 6028 }
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "phi-mixformer-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } async function concatenateArrayBuffers(urls) { const arrayBuffers = await Promise.all(urls.map(url => fetchArrayBuffer(url))); let totalLength = arrayBuffers.reduce((acc, arrayBuffer) => acc + arrayBuffer.byteLength, 0); let concatenatedBuffer = new Uint8Array(totalLength); let offset = 0; arrayBuffers.forEach(buffer => { concatenatedBuffer.set(new Uint8Array(buffer), offset); offset += buffer.byteLength; }); return concatenatedBuffer; } class Phi { static instance = {}; static async getInstance( weightsURL, modelID, tokenizerURL, configURL, quantized ) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ weightsURL instanceof Array ? concatenateArrayBuffers(weightsURL) : fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Model( weightsArrayU8, tokenizerArrayU8, configArrayU8, quantized ); } return this.instance[modelID]; } } let controller = null; self.addEventListener("message", (event) => { if (event.data.command === "start") { controller = new AbortController(); generate(event.data); } else if (event.data.command === "abort") { controller.abort(); } }); async function generate(data) { const { weightsURL, modelID, tokenizerURL, configURL, quantized, prompt, temp, top_p, repeatPenalty, seed, maxSeqLen, } = data; try { self.postMessage({ status: "loading", message: "Starting Phi" }); const model = await Phi.getInstance( weightsURL, modelID, tokenizerURL, configURL, quantized ); self.postMessage({ status: "loading", message: "Initializing model" }); const firstToken = model.init_with_prompt( prompt, temp, top_p, repeatPenalty, 64, BigInt(seed) ); const seq_len = 2048; let sentence = firstToken; let maxTokens = maxSeqLen ? maxSeqLen : seq_len - prompt.length - 1; let startTime = performance.now(); let tokensCount = 0; while (tokensCount < maxTokens) { await new Promise(async (resolve) => { if (controller && controller.signal.aborted) { self.postMessage({ status: "aborted", message: "Aborted", output: prompt + sentence, }); return; } const token = await model.next_token(); if (token === "<|endoftext|>") { self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); return; } const tokensSec = ((tokensCount + 1) / (performance.now() - startTime)) * 1000; sentence += token; self.postMessage({ status: "generating", message: "Generating token", token: token, sentence: sentence, totalTime: performance.now() - startTime, tokensSec, prompt: prompt, }); setTimeout(resolve, 0); }); tokensCount++; } self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); } catch (e) { self.postMessage({ error: e }); } }
candle/candle-wasm-examples/phi/phiWorker.js/0
{ "file_path": "candle/candle-wasm-examples/phi/phiWorker.js", "repo_id": "candle", "token_count": 1667 }
use candle::{Device, Tensor}; use candle_transformers::generation::LogitsProcessor; pub use candle_transformers::models::quantized_t5::{ Config, T5EncoderModel, T5ForConditionalGeneration, VarBuilder, }; use candle_wasm_example_t5::console_log; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; const DEVICE: Device = Device::Cpu; #[wasm_bindgen] pub struct ModelEncoder { model: T5EncoderModel, tokenizer: Tokenizer, } #[wasm_bindgen] pub struct ModelConditionalGeneration { model: T5ForConditionalGeneration, tokenizer: Tokenizer, config: Config, } #[wasm_bindgen] impl ModelConditionalGeneration { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelConditionalGeneration, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let vb = VarBuilder::from_gguf_buffer(&weights, &DEVICE)?; let mut config: Config = serde_json::from_slice(&config)?; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5ForConditionalGeneration::load(vb, &config)?; config.use_cache = false; Ok(Self { model, tokenizer, config, }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let input: ConditionalGenerationParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let device = &DEVICE; self.model.clear_kv_cache(); let mut output_token_ids = [self.config.pad_token_id as u32].to_vec(); let prompt = input.prompt; let repeat_penalty = input.repeat_penalty; let repeat_last_n = input.repeat_last_n; let seed = input.seed; let max_length = usize::clamp(input.max_length.unwrap_or(512), 0, 512); let temperature = if input.temperature <= 0. { None } else { Some(input.temperature) }; let top_p = if input.top_p <= 0. || input.top_p >= 1. { None } else { Some(input.top_p) }; let mut logits_processor = LogitsProcessor::new(seed, temperature, top_p); let tokens = self .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let encoder_output = self.model.encode(&input_token_ids)?; let mut decoded = String::new(); for index in 0.. { if output_token_ids.len() > max_length { break; } let decoder_token_ids = if index == 0 { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = self .model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == self.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = self.tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); decoded += &text; } } Ok(serde_wasm_bindgen::to_value( &ConditionalGenerationOutput { generation: decoded, }, )?) } } #[wasm_bindgen] impl ModelEncoder { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelEncoder, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let vb = VarBuilder::from_gguf_buffer(&weights, &DEVICE)?; let mut config: Config = serde_json::from_slice(&config)?; config.use_cache = false; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5EncoderModel::load(vb, &config)?; Ok(Self { model, tokenizer }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let device = &DEVICE; let input: DecoderParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; self.model.clear_kv_cache(); let sentences = input.sentences; let normalize_embeddings = input.normalize_embeddings; let n_sentences = sentences.len(); let mut all_embeddings = Vec::with_capacity(n_sentences); for sentence in sentences { let tokens = self .tokenizer .encode(sentence, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let embeddings = self.model.forward(&token_ids)?; console_log!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if normalize_embeddings { embeddings.broadcast_div(&embeddings.sqr()?.sum_keepdim(1)?.sqrt()?)? } else { embeddings }; console_log!("{:?}", embeddings.shape()); all_embeddings.push(embeddings.squeeze(0)?.to_vec1::<f32>()?); } Ok(serde_wasm_bindgen::to_value(&DecoderOutput { embeddings: all_embeddings, })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct ConditionalGenerationOutput { generation: String, } #[derive(serde::Serialize, serde::Deserialize)] struct DecoderOutput { embeddings: Vec<Vec<f32>>, } #[derive(serde::Serialize, serde::Deserialize)] pub struct DecoderParams { sentences: Vec<String>, normalize_embeddings: bool, } #[derive(serde::Serialize, serde::Deserialize)] pub struct ConditionalGenerationParams { prompt: String, temperature: f64, seed: u64, top_p: f64, repeat_penalty: f32, repeat_last_n: usize, max_length: Option<usize>, } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/t5/src/bin/m-quantized.rs/0
{ "file_path": "candle/candle-wasm-examples/t5/src/bin/m-quantized.rs", "repo_id": "candle", "token_count": 3555 }
//load the candle yolo wasm module import init, { Model, ModelPose } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "yolo-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Yolo { static instance = {}; // Retrieve the YOLO model. When called for the first time, // this will load the model and save it for future use. static async getInstance(modelID, modelURL, modelSize) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: `loading model ${modelID}:${modelSize}` }); const weightsArrayU8 = await fetchArrayBuffer(modelURL); if (/pose/.test(modelID)) { // if pose model, use ModelPose this.instance[modelID] = new ModelPose(weightsArrayU8, modelSize); } else { this.instance[modelID] = new Model(weightsArrayU8, modelSize); } } else { self.postMessage({ status: "model already loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { imageURL, modelID, modelURL, modelSize, confidence, iou_threshold } = event.data; try { self.postMessage({ status: "detecting" }); const yolo = await Yolo.getInstance(modelID, modelURL, modelSize); self.postMessage({ status: "loading image" }); const imgRes = await fetch(imageURL); const imgData = await imgRes.arrayBuffer(); const imageArrayU8 = new Uint8Array(imgData); self.postMessage({ status: `running inference ${modelID}:${modelSize}` }); const bboxes = yolo.run(imageArrayU8, confidence, iou_threshold); // Send the output back to the main thread as JSON self.postMessage({ status: "complete", output: JSON.parse(bboxes), }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/yolo/yoloWorker.js/0
{ "file_path": "candle/candle-wasm-examples/yolo/yoloWorker.js", "repo_id": "candle", "token_count": 756 }
# Prompt templates These are the templates used to format the conversation history for different models used in HuggingChat. Set them in your `.env.local` [like so](https://github.com/huggingface/chat-ui#chatprompttemplate). ## Llama 2 ```env <s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}} ``` ## CodeLlama ```env <s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}} ``` ## Falcon ```env System: {{preprompt}}\nUser:{{#each messages}}{{#ifUser}}{{content}}\nFalcon:{{/ifUser}}{{#ifAssistant}}{{content}}\nUser:{{/ifAssistant}}{{/each}} ``` ## Mistral ```env <s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s> {{/ifAssistant}}{{/each}} ``` ## Zephyr ```env <|system|>\n{{preprompt}}</s>\n{{#each messages}}{{#ifUser}}<|user|>\n{{content}}</s>\n<|assistant|>\n{{/ifUser}}{{#ifAssistant}}{{content}}</s>\n{{/ifAssistant}}{{/each}} ``` ## IDEFICS ```env {{#each messages}}{{#ifUser}}User: {{content}}{{/ifUser}}<end_of_utterance>\nAssistant: {{#ifAssistant}}{{content}}\n{{/ifAssistant}}{{/each}} ``` ## OpenChat ```env <s>{{#each messages}}{{#ifUser}}GPT4 User: {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}}<|end_of_turn|>GPT4 Assistant: {{/ifUser}}{{#ifAssistant}}{{content}}<|end_of_turn|>{{/ifAssistant}}{{/each}} ``` ## Mixtral ```env <s> {{#each messages}}{{#ifUser}}[INST]{{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}} {{content}}</s> {{/ifAssistant}}{{/each}} ``` ## ChatML ```env {{#if @root.preprompt}}<|im_start|>system\n{{@root.preprompt}}<|im_end|>\n{{/if}}{{#each messages}}{{#ifUser}}<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n{{/ifUser}}{{#ifAssistant}}{{content}}<|im_end|>\n{{/ifAssistant}}{{/each}} ``` ## CodeLlama 70B ```env <s>{{#if @root.preprompt}}Source: system\n\n {{@root.preprompt}} <step> {{/if}}{{#each messages}}{{#ifUser}}Source: user\n\n {{content}} <step> {{/ifUser}}{{#ifAssistant}}Source: assistant\n\n {{content}} <step> {{/ifAssistant}}{{/each}}Source: assistant\nDestination: user\n\n `` ``` ## Gemma ```env {{#each messages}}{{#ifUser}}<start_of_turn>user\n{{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}}<end_of_turn>\n<start_of_turn>model\n{{/ifUser}}{{#ifAssistant}}{{content}}<end_of_turn>\n{{/ifAssistant}}{{/each}} ```
chat-ui/PROMPTS.md/0
{ "file_path": "chat-ui/PROMPTS.md", "repo_id": "chat-ui", "token_count": 1133 }
# OpenID The login feature is disabled by default and users are attributed a unique ID based on their browser. But if you want to use OpenID to authenticate your users, you can add the following to your `.env.local` file: ```ini OPENID_CONFIG=`{ PROVIDER_URL: "<your OIDC issuer>", CLIENT_ID: "<your OIDC client ID>", CLIENT_SECRET: "<your OIDC client secret>", SCOPES: "openid profile", TOLERANCE: // optional RESOURCE: // optional }` ``` Redirect URI: `/login/callback`
chat-ui/docs/source/configuration/open-id.md/0
{ "file_path": "chat-ui/docs/source/configuration/open-id.md", "repo_id": "chat-ui", "token_count": 160 }
import { vi, afterAll } from "vitest"; import dotenv from "dotenv"; import { resolve } from "path"; import fs from "fs"; import { MongoMemoryServer } from "mongodb-memory-server"; let mongoServer: MongoMemoryServer; // Load the .env file const envPath = resolve(__dirname, "../.env"); dotenv.config({ path: envPath }); // Read the .env file content const envContent = fs.readFileSync(envPath, "utf-8"); // Parse the .env content const envVars = dotenv.parse(envContent); // Separate public and private variables const publicEnv = {}; const privateEnv = {}; for (const [key, value] of Object.entries(envVars)) { if (key.startsWith("PUBLIC_")) { publicEnv[key] = value; } else { privateEnv[key] = value; } } vi.mock("$env/dynamic/public", () => ({ env: publicEnv, })); vi.mock("$env/dynamic/private", async () => { mongoServer = await MongoMemoryServer.create(); return { env: { ...privateEnv, MONGODB_URL: mongoServer.getUri(), }, }; }); afterAll(async () => { if (mongoServer) { await mongoServer.stop(); } });
chat-ui/scripts/setupTest.ts/0
{ "file_path": "chat-ui/scripts/setupTest.ts", "repo_id": "chat-ui", "token_count": 390 }
<script lang="ts"> interface Props { isCollapsed: boolean; onClick: () => void; classNames: string; } let { isCollapsed, classNames, onClick }: Props = $props(); </script> <button onclick={onClick} class="{classNames} group flex h-16 w-6 flex-col items-center justify-center -space-y-1 outline-none *:h-3 *:w-1 *:rounded-full *:hover:bg-gray-300 dark:*:hover:bg-gray-600 max-md:hidden {!isCollapsed ? '*:bg-gray-200/70 dark:*:bg-gray-800' : '*:bg-gray-200 dark:*:bg-gray-700'}" name="sidebar-toggle" aria-label="Toggle sidebar navigation" > <div class={!isCollapsed ? "group-hover:rotate-[20deg]" : "group-hover:-rotate-[20deg]"}></div> <div class={!isCollapsed ? "group-hover:-rotate-[20deg]" : "group-hover:rotate-[20deg]"}></div> </button>
chat-ui/src/lib/components/ExpandNavigation.svelte/0
{ "file_path": "chat-ui/src/lib/components/ExpandNavigation.svelte", "repo_id": "chat-ui", "token_count": 304 }
<script lang="ts"> import CarbonStopFilledAlt from "~icons/carbon/stop-filled-alt"; interface Props { classNames?: string; onClick?: () => void; } let { classNames = "", onClick }: Props = $props(); </script> <button type="button" onclick={onClick} class="btn flex h-8 rounded-lg border bg-white px-3 py-1 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:hover:bg-gray-600 {classNames}" > <CarbonStopFilledAlt class="-ml-1 mr-1 h-[1.25rem] w-[1.1875rem] text-gray-300" /> Stop generating </button>
chat-ui/src/lib/components/StopGeneratingBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/StopGeneratingBtn.svelte", "repo_id": "chat-ui", "token_count": 211 }
<script lang="ts"> import { run, createBubbler } from "svelte/legacy"; const bubble = createBubbler(); import type { Message, MessageFile } from "$lib/types/Message"; import { createEventDispatcher, onDestroy, tick } from "svelte"; import CarbonExport from "~icons/carbon/export"; import CarbonCheckmark from "~icons/carbon/checkmark"; import CarbonCaretDown from "~icons/carbon/caret-down"; import EosIconsLoading from "~icons/eos-icons/loading"; import ChatInput from "./ChatInput.svelte"; import StopGeneratingBtn from "../StopGeneratingBtn.svelte"; import type { Model } from "$lib/types/Model"; import { page } from "$app/state"; import FileDropzone from "./FileDropzone.svelte"; import RetryBtn from "../RetryBtn.svelte"; import file2base64 from "$lib/utils/file2base64"; import type { Assistant } from "$lib/types/Assistant"; import { base } from "$app/paths"; import ContinueBtn from "../ContinueBtn.svelte"; import AssistantIntroduction from "./AssistantIntroduction.svelte"; import ChatMessage from "./ChatMessage.svelte"; import ScrollToBottomBtn from "../ScrollToBottomBtn.svelte"; import ScrollToPreviousBtn from "../ScrollToPreviousBtn.svelte"; import { browser } from "$app/environment"; import { snapScrollToBottom } from "$lib/actions/snapScrollToBottom"; import SystemPromptModal from "../SystemPromptModal.svelte"; import ChatIntroduction from "./ChatIntroduction.svelte"; import UploadedFile from "./UploadedFile.svelte"; import { useSettingsStore } from "$lib/stores/settings"; import ModelSwitch from "./ModelSwitch.svelte"; import { fly } from "svelte/transition"; import { cubicInOut } from "svelte/easing"; import type { ToolFront } from "$lib/types/Tool"; import { loginModalOpen } from "$lib/stores/loginModal"; interface Props { messages?: Message[]; messagesAlternatives?: Message["id"][][]; loading?: boolean; pending?: boolean; shared?: boolean; currentModel: Model; models: Model[]; assistant?: Assistant | undefined; preprompt?: string | undefined; files?: File[]; } let { messages = [], messagesAlternatives = [], loading = false, pending = false, shared = false, currentModel, models, assistant = undefined, preprompt = undefined, files = $bindable([]), }: Props = $props(); let isReadOnly = $derived(!models.some((model) => model.id === currentModel.id)); let message: string = $state(""); let timeout: ReturnType<typeof setTimeout>; let isSharedRecently = $state(false); let editMsdgId: Message["id"] | null = $state(null); let pastedLongContent = $state(false); run(() => { page.params.id && (isSharedRecently = false); }); const dispatch = createEventDispatcher<{ message: string; share: void; stop: void; retry: { id: Message["id"]; content?: string }; continue: { id: Message["id"] }; }>(); const handleSubmit = () => { if (loading) return; dispatch("message", message); message = ""; }; let lastTarget: EventTarget | null = null; let onDrag = $state(false); const onDragEnter = (e: DragEvent) => { lastTarget = e.target; onDrag = true; }; const onDragLeave = (e: DragEvent) => { if (e.target === lastTarget) { onDrag = false; } }; const onPaste = (e: ClipboardEvent) => { const textContent = e.clipboardData?.getData("text"); if (!$settings.directPaste && textContent && textContent.length >= 3984) { e.preventDefault(); pastedLongContent = true; setTimeout(() => { pastedLongContent = false; }, 1000); const pastedFile = new File([textContent], "Pasted Content", { type: "application/vnd.chatui.clipboard", }); files = [...files, pastedFile]; } if (!e.clipboardData) { return; } // paste of files const pastedFiles = Array.from(e.clipboardData.files); if (pastedFiles.length !== 0) { e.preventDefault(); // filter based on activeMimeTypes, including wildcards const filteredFiles = pastedFiles.filter((file) => { return activeMimeTypes.some((mimeType: string) => { const [type, subtype] = mimeType.split("/"); const [fileType, fileSubtype] = file.type.split("/"); return type === fileType && (subtype === "*" || fileSubtype === subtype); }); }); files = [...files, ...filteredFiles]; } }; let lastMessage = $derived(browser && (messages.at(-1) as Message)); let lastIsError = $derived( lastMessage && !loading && (lastMessage.from === "user" || lastMessage.updates?.findIndex((u) => u.type === "status" && u.status === "error") !== -1) ); let sources = $derived( files?.map<Promise<MessageFile>>((file) => file2base64(file).then((value) => ({ type: "base64", value, mime: file.type, name: file.name, })) ) ); function onShare() { if (!confirm("Are you sure you want to share this conversation? This cannot be undone.")) { return; } dispatch("share"); isSharedRecently = true; if (timeout) { clearTimeout(timeout); } timeout = setTimeout(() => { isSharedRecently = false; }, 2000); } onDestroy(() => { if (timeout) { clearTimeout(timeout); } }); let chatContainer: HTMLElement | undefined = $state(); async function scrollToBottom() { await tick(); if (!chatContainer) return; chatContainer.scrollTop = chatContainer.scrollHeight; } // If last message is from user, scroll to bottom run(() => { if (lastMessage && lastMessage.from === "user") { scrollToBottom(); } }); const settings = useSettingsStore(); let mimeTypesFromActiveTools = $derived( page.data.tools .filter((tool: ToolFront) => { if (assistant) { return assistant.tools?.includes(tool._id); } if (currentModel.tools) { return $settings?.tools?.includes(tool._id) ?? tool.isOnByDefault; } return false; }) .flatMap((tool: ToolFront) => tool.mimeTypes ?? []) ); let activeMimeTypes = $derived( Array.from( new Set([ ...mimeTypesFromActiveTools, // fetch mime types from active tools either from tool settings or active assistant ...(currentModel.tools && !assistant ? ["application/pdf"] : []), // if its a tool model, we can always enable document parser so we always accept pdfs ...(currentModel.multimodal ? (currentModel.multimodalAcceptedMimetypes ?? ["image/*"]) : []), // if its a multimodal model, we always accept images ]) ) ); let isFileUploadEnabled = $derived(activeMimeTypes.length > 0); </script> <svelte:window ondragenter={onDragEnter} ondragleave={onDragLeave} ondragover={(e) => { e.preventDefault(); bubble("dragover"); }} ondrop={(e) => { e.preventDefault(); onDrag = false; }} /> <div class="relative min-h-0 min-w-0"> <div class="scrollbar-custom h-full overflow-y-auto" use:snapScrollToBottom={messages.length ? [...messages] : false} bind:this={chatContainer} > <div class="mx-auto flex h-full max-w-3xl flex-col gap-6 px-5 pt-6 sm:gap-8 xl:max-w-4xl xl:pt-10" > {#if assistant && !!messages.length} <a class="mx-auto flex items-center gap-1.5 rounded-full border border-gray-100 bg-gray-50 py-1 pl-1 pr-3 text-sm text-gray-800 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-200 dark:hover:bg-gray-700" href="{base}/settings/assistants/{assistant._id}" > {#if assistant.avatar} <img src="{base}/settings/assistants/{assistant._id.toString()}/avatar.jpg?hash=${assistant.avatar}" alt="Avatar" class="size-5 rounded-full object-cover" /> {:else} <div class="flex size-6 items-center justify-center rounded-full bg-gray-300 font-bold uppercase text-gray-500" > {assistant.name[0]} </div> {/if} {assistant.name} </a> {:else if preprompt && preprompt != currentModel.preprompt} <SystemPromptModal preprompt={preprompt ?? ""} /> {/if} {#if messages.length > 0} <div class="flex h-max flex-col gap-8 pb-52"> {#each messages as message, idx (message.id)} <ChatMessage {loading} {message} alternatives={messagesAlternatives.find((a) => a.includes(message.id)) ?? []} isAuthor={!shared} readOnly={isReadOnly} isLast={idx === messages.length - 1} bind:editMsdgId on:retry on:vote on:continue on:showAlternateMsg /> {/each} {#if isReadOnly} <ModelSwitch {models} {currentModel} /> {/if} </div> {:else if pending} <ChatMessage loading={true} message={{ id: "0-0-0-0-0", content: "", from: "assistant", children: [], }} isAuthor={!shared} readOnly={isReadOnly} /> {:else if !assistant} <ChatIntroduction {currentModel} on:message={(ev) => { if (page.data.loginRequired) { ev.preventDefault(); $loginModalOpen = true; } else { dispatch("message", ev.detail); } }} /> {:else} <AssistantIntroduction {models} {assistant} on:message={(ev) => { if (page.data.loginRequired) { ev.preventDefault(); $loginModalOpen = true; } else { dispatch("message", ev.detail); } }} /> {/if} </div> <ScrollToPreviousBtn class="fixed right-4 max-md:bottom-[calc(50%+26px)] md:bottom-48 lg:right-10" scrollNode={chatContainer} /> <ScrollToBottomBtn class="fixed right-4 max-md:bottom-[calc(50%-26px)] md:bottom-36 lg:right-10" scrollNode={chatContainer} /> </div> <div class="dark:via-gray-80 pointer-events-none absolute inset-x-0 bottom-0 z-0 mx-auto flex w-full max-w-3xl flex-col items-center justify-center bg-gradient-to-t from-white via-white/80 to-white/0 px-3.5 py-4 dark:border-gray-800 dark:from-gray-900 dark:to-gray-900/0 max-md:border-t max-md:bg-white max-md:dark:bg-gray-900 sm:px-5 md:py-8 xl:max-w-4xl [&>*]:pointer-events-auto" > {#if sources?.length && !loading} <div in:fly|local={sources.length === 1 ? { y: -20, easing: cubicInOut } : undefined} class="flex flex-row flex-wrap justify-center gap-2.5 rounded-xl max-md:pb-3" > {#each sources as source, index} {#await source then src} <UploadedFile file={src} on:close={() => { files = files.filter((_, i) => i !== index); }} /> {/await} {/each} </div> {/if} <div class="w-full"> <div class="flex w-full *:mb-3"> {#if loading} <StopGeneratingBtn classNames="ml-auto" onClick={() => dispatch("stop")} /> {:else if lastIsError} <RetryBtn classNames="ml-auto" onClick={() => { if (lastMessage && lastMessage.ancestors) { dispatch("retry", { id: lastMessage.id, }); } }} /> {:else if messages && lastMessage && lastMessage.interrupted && !isReadOnly} <div class="ml-auto gap-2"> <ContinueBtn onClick={() => { if (lastMessage && lastMessage.ancestors) { dispatch("continue", { id: lastMessage?.id, }); } }} /> </div> {/if} </div> <form tabindex="-1" aria-label={isFileUploadEnabled ? "file dropzone" : undefined} onsubmit={(e) => { e.preventDefault(); handleSubmit(); }} class="relative flex w-full max-w-4xl flex-1 items-center rounded-xl border bg-gray-100 dark:border-gray-600 dark:bg-gray-700 {isReadOnly ? 'opacity-30' : ''}" > {#if onDrag && isFileUploadEnabled} <FileDropzone bind:files bind:onDrag mimeTypes={activeMimeTypes} /> {:else} <div class="flex w-full flex-1 rounded-xl border-none bg-transparent" class:paste-glow={pastedLongContent} > {#if lastIsError} <ChatInput value="Sorry, something went wrong. Please try again." disabled={true} /> {:else} <ChatInput {assistant} placeholder={isReadOnly ? "This conversation is read-only." : "Ask anything"} {loading} bind:value={message} bind:files mimeTypes={activeMimeTypes} on:submit={handleSubmit} {onPaste} disabled={isReadOnly || lastIsError} modelHasTools={currentModel.tools} modelIsMultimodal={currentModel.multimodal} /> {/if} {#if loading} <button disabled class="btn absolute bottom-1 right-0.5 size-10 self-end rounded-lg bg-transparent text-gray-400" > <EosIconsLoading /> </button> {:else} <button class="btn absolute bottom-2 right-2 size-7 self-end rounded-full border bg-white text-black shadow transition-none enabled:hover:bg-white enabled:hover:shadow-inner disabled:opacity-60 dark:border-gray-600 dark:bg-gray-900 dark:text-white dark:hover:enabled:bg-black" disabled={!message || isReadOnly} type="submit" aria-label="Send message" name="submit" > <svg width="1em" height="1em" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg" > <path fill-rule="evenodd" clip-rule="evenodd" d="M17.0606 4.23197C16.4748 3.64618 15.525 3.64618 14.9393 4.23197L5.68412 13.4871C5.09833 14.0729 5.09833 15.0226 5.68412 15.6084C6.2699 16.1942 7.21965 16.1942 7.80544 15.6084L14.4999 8.91395V26.7074C14.4999 27.5359 15.1715 28.2074 15.9999 28.2074C16.8283 28.2074 17.4999 27.5359 17.4999 26.7074V8.91395L24.1944 15.6084C24.7802 16.1942 25.7299 16.1942 26.3157 15.6084C26.9015 15.0226 26.9015 14.0729 26.3157 13.4871L17.0606 4.23197Z" fill="currentColor" /> </svg> </button> {/if} </div> {/if} </form> <div class="mt-2 flex justify-between self-stretch px-1 text-xs text-gray-400/90 max-md:mb-2 max-sm:gap-2" > <p> Model: {#if !assistant} {#if models.find((m) => m.id === currentModel.id)} <a href="{base}/settings/{currentModel.id}" class="inline-flex items-center hover:underline" >{currentModel.displayName}<CarbonCaretDown class="text-xxs" /></a > {:else} <span class="inline-flex items-center line-through dark:border-gray-700"> {currentModel.id} </span> {/if} {:else} {@const model = models.find((m) => m.id === assistant?.modelId)} {#if model} <a href="{base}/settings/assistants/{assistant._id}" class="inline-flex items-center border-b hover:text-gray-600 dark:border-gray-700 dark:hover:text-gray-300" >{model?.displayName}<CarbonCaretDown class="text-xxs" /></a > {:else} <span class="inline-flex items-center line-through dark:border-gray-700"> {currentModel.id} </span> {/if} {/if} <span class="max-sm:hidden">·</span><br class="sm:hidden" /> Generated content may be inaccurate or false. </p> {#if messages.length} <button class="flex flex-none items-center hover:text-gray-400 max-sm:rounded-lg max-sm:bg-gray-50 max-sm:px-2.5 dark:max-sm:bg-gray-800" type="button" class:hover:underline={!isSharedRecently} onclick={onShare} disabled={isSharedRecently} > {#if isSharedRecently} <CarbonCheckmark class="text-[.6rem] sm:mr-1.5 sm:text-green-600" /> <div class="text-green-600 max-sm:hidden">Link copied to clipboard</div> {:else} <CarbonExport class="sm:text-primary-500 text-[.6rem] sm:mr-1.5" /> <div class="max-sm:hidden">Share this conversation</div> {/if} </button> {/if} </div> </div> </div> </div> <style lang="postcss"> .paste-glow { animation: glow 1s cubic-bezier(0.4, 0, 0.2, 1) forwards; will-change: box-shadow; } @keyframes glow { 0% { box-shadow: 0 0 0 0 rgba(59, 130, 246, 0.8); } 50% { box-shadow: 0 0 20px 4px rgba(59, 130, 246, 0.6); } 100% { box-shadow: 0 0 0 0 rgba(59, 130, 246, 0); } } </style>
chat-ui/src/lib/components/chat/ChatWindow.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatWindow.svelte", "repo_id": "chat-ui", "token_count": 7079 }
<script lang="ts"> interface Props { classNames?: string; } let { classNames = "" }: Props = $props(); </script> <svg class={classNames} xmlns="http://www.w3.org/2000/svg" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" fill="currentColor" viewBox="0 0 32 32" ><path fill-rule="evenodd" clip-rule="evenodd" d="M5.98 6.01h20.04v16.1H5.98V6.02Zm-2.1 0c0-1.16.94-2.1 2.1-2.1h20.04c1.16 0 2.1.94 2.1 2.1v16.1a2.1 2.1 0 0 1-2.1 2.11H5.98a2.1 2.1 0 0 1-2.1-2.1V6.02Zm5.7 1.65a2.1 2.1 0 0 0-2.1 2.1v2.61a1.05 1.05 0 0 0 2.1 0v-2.6h2.96a1.05 1.05 0 1 0 0-2.11H9.58ZM24.41 18.4a2.1 2.1 0 0 1-2.1 2.1h-2.95a1.05 1.05 0 1 1 0-2.1h2.95v-2.61a1.05 1.05 0 0 1 2.1 0v2.6ZM10.1 25.9a1.05 1.05 0 1 0 0 2.1H22.3a1.05 1.05 0 1 0 0-2.1H10.1Z" /> </svg>
chat-ui/src/lib/components/icons/IconScreenshot.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconScreenshot.svelte", "repo_id": "chat-ui", "token_count": 471 }
import { ObjectId, type WithId } from "mongodb"; import { collections } from "$lib/server/database"; import type { Migration } from "."; import type { Conversation } from "$lib/types/Conversation"; import type { MessageFile } from "$lib/types/Message"; const updateMessageFiles: Migration = { _id: new ObjectId("5f9f5f5f5f5f5f5f5f5f5f5f"), name: "Convert message files to the new schema", up: async () => { const allConversations = collections.conversations.find({}, { projection: { messages: 1 } }); let conversation: WithId<Pick<Conversation, "messages">> | null = null; while ((conversation = await allConversations.tryNext())) { const messages = conversation.messages.map((message) => { const files = (message.files as string[] | undefined)?.map<MessageFile>((file) => { // File is already in the new format if (typeof file !== "string") return file; // File was a hash pointing to a file in the bucket if (file.length === 64) { return { type: "hash", name: "unknown.jpg", value: file, mime: "image/jpeg", }; } // File was a base64 string else { return { type: "base64", name: "unknown.jpg", value: file, mime: "image/jpeg", }; } }); return { ...message, files, }; }); // Set the new messages array await collections.conversations.updateOne({ _id: conversation._id }, { $set: { messages } }); } return true; }, runEveryTime: false, }; export default updateMessageFiles;
chat-ui/src/lib/migrations/routines/05-update-message-files.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/05-update-message-files.ts", "repo_id": "chat-ui", "token_count": 618 }
import { z } from "zod"; import type { Endpoint } from "../endpoints"; import { env } from "$env/dynamic/private"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import { createImageProcessorOptionsValidator } from "../images"; import { endpointMessagesToAnthropicMessages, addToolResults } from "./utils"; import { createDocumentProcessorOptionsValidator } from "../document"; import type { Tool, ToolCall, ToolInput, ToolInputFile, ToolInputFixed, ToolInputOptional, } from "$lib/types/Tool"; import type Anthropic from "@anthropic-ai/sdk"; import type { MessageParam } from "@anthropic-ai/sdk/resources/messages.mjs"; import directlyAnswer from "$lib/server/tools/directlyAnswer"; export const endpointAnthropicParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("anthropic"), baseURL: z.string().url().default("https://api.anthropic.com"), apiKey: z.string().default(env.ANTHROPIC_API_KEY ?? "sk-"), defaultHeaders: z.record(z.string()).optional(), defaultQuery: z.record(z.string()).optional(), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: ["image/png", "image/jpeg", "image/webp"], preferredMimeType: "image/webp", // The 4 / 3 compensates for the 33% increase in size when converting to base64 maxSizeInMB: (5 / 4) * 3, maxWidth: 4096, maxHeight: 4096, }), document: createDocumentProcessorOptionsValidator({ supportedMimeTypes: ["application/pdf"], maxSizeInMB: 32, }), }) .default({}), }); export async function endpointAnthropic( input: z.input<typeof endpointAnthropicParametersSchema> ): Promise<Endpoint> { const { baseURL, apiKey, model, defaultHeaders, defaultQuery, multimodal } = endpointAnthropicParametersSchema.parse(input); let Anthropic; try { Anthropic = (await import("@anthropic-ai/sdk")).default; } catch (e) { throw new Error("Failed to import @anthropic-ai/sdk", { cause: e }); } const anthropic = new Anthropic({ apiKey, baseURL, defaultHeaders, defaultQuery, }); return async ({ messages, preprompt, generateSettings, conversationId, tools = [], toolResults = [], }) => { let system = preprompt; if (messages?.[0]?.from === "system") { system = messages[0].content; } let tokenId = 0; if (tools.length === 0 && toolResults.length > 0) { const toolNames = new Set(toolResults.map((tool) => tool.call.name)); tools = Array.from(toolNames).map((name) => ({ name, description: "", inputs: [], })) as unknown as Tool[]; } const parameters = { ...model.parameters, ...generateSettings }; return (async function* () { const stream = anthropic.messages.stream({ model: model.id ?? model.name, tools: createAnthropicTools(tools), tool_choice: tools.length > 0 ? { type: "auto", disable_parallel_tool_use: false } : undefined, messages: addToolResults( await endpointMessagesToAnthropicMessages(messages, multimodal, conversationId), toolResults ) as MessageParam[], max_tokens: parameters?.max_new_tokens, temperature: parameters?.temperature, top_p: parameters?.top_p, top_k: parameters?.top_k, stop_sequences: parameters?.stop, system, }); while (true) { const result = await Promise.race([stream.emitted("text"), stream.emitted("end")]); if (result === undefined) { if ("tool_use" === stream.receivedMessages[0].stop_reason) { // this should really create a new "Assistant" message with the tool id in it. const toolCalls: ToolCall[] = stream.receivedMessages[0].content .filter( (block): block is Anthropic.Messages.ContentBlock & { type: "tool_use" } => block.type === "tool_use" ) .map((block) => ({ name: block.name, parameters: block.input as Record<string, string | number | boolean>, id: block.id, })); yield { token: { id: tokenId, text: "", logprob: 0, special: false, toolCalls }, generated_text: null, details: null, }; } else { yield { token: { id: tokenId++, text: "", logprob: 0, special: true, }, generated_text: await stream.finalText(), details: null, } satisfies TextGenerationStreamOutput; } return; } // Text delta yield { token: { id: tokenId++, text: result as unknown as string, special: false, logprob: 0, }, generated_text: null, details: null, } satisfies TextGenerationStreamOutput; } })(); }; } function createAnthropicTools(tools: Tool[]): Anthropic.Messages.Tool[] { return tools .filter((tool) => tool.name !== directlyAnswer.name) .map((tool) => { const properties = tool.inputs.reduce( (acc, input) => { acc[input.name] = convertToolInputToJSONSchema(input); return acc; }, {} as Record<string, unknown> ); const required = tool.inputs .filter((input) => input.paramType === "required") .map((input) => input.name); return { name: tool.name, description: tool.description, input_schema: { type: "object", properties, required: required.length > 0 ? required : undefined, }, }; }); } function convertToolInputToJSONSchema(input: ToolInput): Record<string, unknown> { const baseSchema: Record<string, unknown> = {}; if ("description" in input) { baseSchema["description"] = input.description || ""; } switch (input.paramType) { case "optional": baseSchema["default"] = (input as ToolInputOptional).default; break; case "fixed": baseSchema["const"] = (input as ToolInputFixed).value; break; } if (input.type === "file") { baseSchema["type"] = "string"; baseSchema["format"] = "binary"; baseSchema["mimeTypes"] = (input as ToolInputFile).mimeTypes; } else { switch (input.type) { case "str": baseSchema["type"] = "string"; break; case "int": baseSchema["type"] = "integer"; break; case "float": baseSchema["type"] = "number"; break; case "bool": baseSchema["type"] = "boolean"; break; } } return baseSchema; }
chat-ui/src/lib/server/endpoints/anthropic/endpointAnthropic.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/anthropic/endpointAnthropic.ts", "repo_id": "chat-ui", "token_count": 2502 }
import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type OpenAI from "openai"; import type { Stream } from "openai/streaming"; import type { ToolCall } from "$lib/types/Tool"; type ToolCallWithParameters = { toolCall: ToolCall; parameterJsonString: string; }; function prepareToolCalls(toolCallsWithParameters: ToolCallWithParameters[], tokenId: number) { const toolCalls: ToolCall[] = []; for (const toolCallWithParameters of toolCallsWithParameters) { // HACK: sometimes gpt4 via azure returns the JSON with literal newlines in it // like {\n "foo": "bar" } const s = toolCallWithParameters.parameterJsonString.replace("\n", ""); const params = JSON.parse(s); const toolCall = toolCallWithParameters.toolCall; for (const name in params) { toolCall.parameters[name] = params[name]; } toolCalls.push(toolCall); } const output = { token: { id: tokenId, text: "", logprob: 0, special: false, toolCalls, }, generated_text: null, details: null, }; return output; } /** * Transform a stream of OpenAI.Chat.ChatCompletion into a stream of TextGenerationStreamOutput */ export async function* openAIChatToTextGenerationStream( completionStream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk> ) { let generatedText = ""; let tokenId = 0; const toolCalls: ToolCallWithParameters[] = []; for await (const completion of completionStream) { const { choices } = completion; const content = choices[0]?.delta?.content ?? ""; const last = choices[0]?.finish_reason === "stop" || choices[0]?.finish_reason === "length"; if (content) { generatedText = generatedText + content; } const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: content ?? "", logprob: 0, special: last, }, generated_text: last ? generatedText : null, details: null, }; yield output; const tools = completion.choices[0]?.delta?.tool_calls || []; for (const tool of tools) { if (tool.id) { if (!tool.function?.name) { throw new Error("Tool call without function name"); } const toolCallWithParameters: ToolCallWithParameters = { toolCall: { name: tool.function.name, parameters: {}, }, parameterJsonString: "", }; toolCalls.push(toolCallWithParameters); } if (toolCalls.length > 0 && tool.function?.arguments) { toolCalls[toolCalls.length - 1].parameterJsonString += tool.function.arguments; } } if (choices[0]?.finish_reason === "tool_calls") { yield prepareToolCalls(toolCalls, tokenId++); } } }
chat-ui/src/lib/server/endpoints/openai/openAIChatToTextGenerationStream.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/openai/openAIChatToTextGenerationStream.ts", "repo_id": "chat-ui", "token_count": 963 }
import { smallModel } from "$lib/server/models"; import { MessageUpdateType, type MessageUpdate } from "$lib/types/MessageUpdate"; import type { EndpointMessage } from "./endpoints/endpoints"; export async function* generateFromDefaultEndpoint({ messages, preprompt, generateSettings, }: { messages: EndpointMessage[]; preprompt?: string; generateSettings?: Record<string, unknown>; }): AsyncGenerator<MessageUpdate, string, undefined> { const endpoint = await smallModel.getEndpoint(); const tokenStream = await endpoint({ messages, preprompt, generateSettings }); for await (const output of tokenStream) { // if not generated_text is here it means the generation is not done if (output.generated_text) { let generated_text = output.generated_text; for (const stop of [...(smallModel.parameters?.stop ?? []), "<|endoftext|>"]) { if (generated_text.endsWith(stop)) { generated_text = generated_text.slice(0, -stop.length).trimEnd(); } } return generated_text; } yield { type: MessageUpdateType.Stream, token: output.token.text, }; } throw new Error("Generation failed"); }
chat-ui/src/lib/server/generateFromDefaultEndpoint.ts/0
{ "file_path": "chat-ui/src/lib/server/generateFromDefaultEndpoint.ts", "repo_id": "chat-ui", "token_count": 362 }
import type { ConfigTool } from "$lib/types/Tool"; import { ObjectId } from "mongodb"; const directlyAnswer: ConfigTool = { _id: new ObjectId("00000000000000000000000D"), type: "config", description: "Answer the user's query directly", color: "blue", icon: "chat", displayName: "Directly Answer", isOnByDefault: true, isLocked: true, isHidden: true, name: "directlyAnswer", endpoint: null, inputs: [], outputComponent: null, outputComponentIdx: null, showOutput: false, async *call() { return { outputs: [], display: false, }; }, }; export default directlyAnswer;
chat-ui/src/lib/server/tools/directlyAnswer.ts/0
{ "file_path": "chat-ui/src/lib/server/tools/directlyAnswer.ts", "repo_id": "chat-ui", "token_count": 209 }
import { defaultEmbeddingModel, embeddingModels } from "$lib/server/embeddingModels"; import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import type { WebSearch, WebSearchScrapedSource } from "$lib/types/WebSearch"; import type { Assistant } from "$lib/types/Assistant"; import type { MessageWebSearchUpdate } from "$lib/types/MessageUpdate"; import { search } from "./search/search"; import { scrape } from "./scrape/scrape"; import { findContextSources } from "./embed/embed"; import { removeParents } from "./markdown/tree"; import { makeErrorUpdate, makeFinalAnswerUpdate, makeGeneralUpdate, makeSourcesUpdate, } from "./update"; import { mergeAsyncGenerators } from "$lib/utils/mergeAsyncGenerators"; import { MetricsServer } from "../metrics"; import { logger } from "$lib/server/logger"; const MAX_N_PAGES_TO_SCRAPE = 8 as const; const MAX_N_PAGES_TO_EMBED = 5 as const; export async function* runWebSearch( conv: Conversation, messages: Message[], ragSettings?: Assistant["rag"], query?: string ): AsyncGenerator<MessageWebSearchUpdate, WebSearch, undefined> { const prompt = messages[messages.length - 1].content; const createdAt = new Date(); const updatedAt = new Date(); MetricsServer.getMetrics().webSearch.requestCount.inc(); try { const embeddingModel = embeddingModels.find((m) => m.id === conv.embeddingModel) ?? defaultEmbeddingModel; if (!embeddingModel) { throw Error(`Embedding model ${conv.embeddingModel} not available anymore`); } // Search the web const { searchQuery, pages } = yield* search(messages, ragSettings, query); if (pages.length === 0) throw Error("No results found for this search query"); // Scrape pages yield makeGeneralUpdate({ message: "Browsing search results" }); const allScrapedPages = yield* mergeAsyncGenerators( pages.slice(0, MAX_N_PAGES_TO_SCRAPE).map(scrape(embeddingModel.chunkCharLength)) ); const scrapedPages = allScrapedPages .filter((p): p is WebSearchScrapedSource => Boolean(p)) .filter((p) => p.page.markdownTree.children.length > 0) .slice(0, MAX_N_PAGES_TO_EMBED); if (!scrapedPages.length) { throw Error(`No text found in the first ${MAX_N_PAGES_TO_SCRAPE} results`); } // Chunk the text of each of the elements and find the most similar chunks to the prompt yield makeGeneralUpdate({ message: "Extracting relevant information" }); const contextSources = await findContextSources(scrapedPages, prompt, embeddingModel).then( (ctxSources) => ctxSources.map((source) => ({ ...source, page: { ...source.page, markdownTree: removeParents(source.page.markdownTree) }, })) ); yield makeSourcesUpdate(contextSources); const webSearch: WebSearch = { prompt, searchQuery, results: scrapedPages.map(({ page, ...source }) => ({ ...source, page: { ...page, markdownTree: removeParents(page.markdownTree) }, })), contextSources, createdAt, updatedAt, }; yield makeFinalAnswerUpdate(); return webSearch; } catch (searchError) { const message = searchError instanceof Error ? searchError.message : String(searchError); logger.error(message); yield makeErrorUpdate({ message: "An error occurred", args: [message] }); const webSearch: WebSearch = { prompt, searchQuery: "", results: [], contextSources: [], createdAt, updatedAt, }; yield makeFinalAnswerUpdate(); return webSearch; } }
chat-ui/src/lib/server/websearch/runWebSearch.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/runWebSearch.ts", "repo_id": "chat-ui", "token_count": 1159 }
import type { WebSearchSource } from "$lib/types/WebSearch"; import { MessageUpdateType, MessageWebSearchUpdateType, type MessageWebSearchErrorUpdate, type MessageWebSearchFinishedUpdate, type MessageWebSearchGeneralUpdate, type MessageWebSearchSourcesUpdate, } from "$lib/types/MessageUpdate"; export function makeGeneralUpdate( update: Pick<MessageWebSearchGeneralUpdate, "message" | "args"> ): MessageWebSearchGeneralUpdate { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Update, ...update, }; } export function makeErrorUpdate( update: Pick<MessageWebSearchErrorUpdate, "message" | "args"> ): MessageWebSearchErrorUpdate { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Error, ...update, }; } export function makeSourcesUpdate(sources: WebSearchSource[]): MessageWebSearchSourcesUpdate { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Sources, message: "sources", sources: sources.map(({ link, title }) => ({ link, title })), }; } export function makeFinalAnswerUpdate(): MessageWebSearchFinishedUpdate { return { type: MessageUpdateType.WebSearch, subtype: MessageWebSearchUpdateType.Finished, }; }
chat-ui/src/lib/server/websearch/update.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/update.ts", "repo_id": "chat-ui", "token_count": 374 }
import type { MessageUpdate } from "./MessageUpdate"; import type { Timestamps } from "./Timestamps"; import type { WebSearch } from "./WebSearch"; import type { v4 } from "uuid"; export type Message = Partial<Timestamps> & { from: "user" | "assistant" | "system"; id: ReturnType<typeof v4>; content: string; updates?: MessageUpdate[]; webSearchId?: WebSearch["_id"]; // legacy version webSearch?: WebSearch; reasoning?: string; score?: -1 | 0 | 1; /** * Either contains the base64 encoded image data * or the hash of the file stored on the server **/ files?: MessageFile[]; interrupted?: boolean; // needed for conversation trees ancestors?: Message["id"][]; // goes one level deep children?: Message["id"][]; }; export type MessageFile = { type: "hash" | "base64"; name: string; value: string; mime: string; };
chat-ui/src/lib/types/Message.ts/0
{ "file_path": "chat-ui/src/lib/types/Message.ts", "repo_id": "chat-ui", "token_count": 277 }
import type { ObjectId } from "mongodb"; import type { Timestamps } from "./Timestamps"; export interface User extends Timestamps { _id: ObjectId; username?: string; name: string; email?: string; avatarUrl: string | undefined; hfUserId: string; isAdmin?: boolean; isEarlyAccess?: boolean; }
chat-ui/src/lib/types/User.ts/0
{ "file_path": "chat-ui/src/lib/types/User.ts", "repo_id": "chat-ui", "token_count": 100 }
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { describe, expect, it } from "vitest"; import { insertLegacyConversation, insertSideBranchesConversation } from "./treeHelpers.spec"; import { addChildren } from "./addChildren"; import type { Message } from "$lib/types/Message"; const newMessage: Omit<Message, "id"> = { content: "new message", from: "user", }; Object.freeze(newMessage); describe("addChildren", async () => { it("should let you append on legacy conversations", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const convLength = conv.messages.length; addChildren(conv, newMessage, conv.messages[conv.messages.length - 1].id); expect(conv.messages.length).toEqual(convLength + 1); }); it("should not let you create branches on legacy conversations", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); expect(() => addChildren(conv, newMessage, conv.messages[0].id)).toThrow(); }); it("should not let you create a message that already exists", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const messageThatAlreadyExists: Message = { id: conv.messages[0].id, content: "new message", from: "user", }; expect(() => addChildren(conv, messageThatAlreadyExists, conv.messages[0].id)).toThrow(); }); it("should let you create branches on conversations with subtrees", async () => { const convId = await insertSideBranchesConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const nChildren = conv.messages[0].children?.length; if (!nChildren) throw new Error("No children found"); addChildren(conv, newMessage, conv.messages[0].id); expect(conv.messages[0].children?.length).toEqual(nChildren + 1); }); it("should let you create a new leaf", async () => { const convId = await insertSideBranchesConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const parentId = conv.messages[conv.messages.length - 1].id; const nChildren = conv.messages[conv.messages.length - 1].children?.length; if (nChildren === undefined) throw new Error("No children found"); expect(nChildren).toEqual(0); addChildren(conv, newMessage, parentId); expect(conv.messages[conv.messages.length - 2].children?.length).toEqual(nChildren + 1); }); it("should let you append to an empty conversation without specifying a parentId", async () => { const conv = { _id: new ObjectId(), rootMessageId: undefined, messages: [] as Message[], }; addChildren(conv, newMessage); expect(conv.messages.length).toEqual(1); expect(conv.rootMessageId).toEqual(conv.messages[0].id); }); it("should throw if you don't specify a parentId in a conversation with messages", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); expect(() => addChildren(conv, newMessage)).toThrow(); }); it("should return the id of the new message", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); expect(addChildren(conv, newMessage, conv.messages[conv.messages.length - 1].id)).toEqual( conv.messages[conv.messages.length - 1].id ); }); });
chat-ui/src/lib/utils/tree/addChildren.spec.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/addChildren.spec.ts", "repo_id": "chat-ui", "token_count": 1301 }
import { json } from "@sveltejs/kit"; import { logger } from "$lib/server/logger"; import { computeAllStats } from "$lib/jobs/refresh-conversation-stats"; // Triger like this: // curl -X POST "http://localhost:5173/chat/admin/stats/compute" -H "Authorization: Bearer <ADMIN_API_SECRET>" export async function POST() { computeAllStats().catch((e) => logger.error(e)); return json( { message: "Stats job started", }, { status: 202 } ); }
chat-ui/src/routes/admin/stats/compute/+server.ts/0
{ "file_path": "chat-ui/src/routes/admin/stats/compute/+server.ts", "repo_id": "chat-ui", "token_count": 161 }
<script lang="ts"> import type { PageData } from "./$types"; import { env as envPublic } from "$env/dynamic/public"; import { isHuggingChat } from "$lib/utils/isHuggingChat"; import { goto } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/state"; import CarbonAdd from "~icons/carbon/add"; import CarbonHelpFilled from "~icons/carbon/help-filled"; import CarbonClose from "~icons/carbon/close"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import CarbonEarthAmerica from "~icons/carbon/earth-americas-filled"; import CarbonUserMultiple from "~icons/carbon/user-multiple"; import CarbonSearch from "~icons/carbon/search"; import CarbonTools from "~icons/carbon/tools"; import Pagination from "$lib/components/Pagination.svelte"; import { formatUserCount } from "$lib/utils/formatUserCount"; import { getHref } from "$lib/utils/getHref"; import { debounce } from "$lib/utils/debounce"; import { useSettingsStore } from "$lib/stores/settings"; import IconInternet from "$lib/components/icons/IconInternet.svelte"; import { isDesktop } from "$lib/utils/isDesktop"; import { SortKey } from "$lib/types/Assistant"; import { ReviewStatus } from "$lib/types/Review"; import { loginModalOpen } from "$lib/stores/loginModal"; interface Props { data: PageData; } let { data = $bindable() }: Props = $props(); let assistantsCreator = $derived(page.url.searchParams.get("user")); let createdByMe = $derived(data.user?.username && data.user.username === assistantsCreator); const SEARCH_DEBOUNCE_DELAY = 400; let filterInputEl: HTMLInputElement | undefined = $state(); let filterValue = $state(data.query); let isFilterInPorgress = false; let sortValue = $state(data.sort as SortKey); let showUnfeatured = $state(data.showUnfeatured); const toggleShowUnfeatured = () => { showUnfeatured = !showUnfeatured; const newUrl = getHref(page.url, { newKeys: { showUnfeatured: showUnfeatured ? "true" : undefined }, existingKeys: { behaviour: "delete", keys: [] }, }); goto(newUrl); }; const onModelChange = (e: Event) => { const newUrl = getHref(page.url, { newKeys: { modelId: (e.target as HTMLSelectElement).value }, existingKeys: { behaviour: "delete_except", keys: ["user"] }, }); resetFilter(); goto(newUrl); }; const resetFilter = () => { filterValue = ""; isFilterInPorgress = false; }; const filterOnName = debounce(async (value: string) => { filterValue = value; if (isFilterInPorgress) { return; } isFilterInPorgress = true; const newUrl = getHref(page.url, { newKeys: { q: value }, existingKeys: { behaviour: "delete", keys: ["p"] }, }); await goto(newUrl); if (isDesktop(window)) { setTimeout(() => filterInputEl?.focus(), 0); } isFilterInPorgress = false; // there was a new filter query before server returned response if (filterValue !== value) { filterOnName(filterValue); } }, SEARCH_DEBOUNCE_DELAY); const sortAssistants = () => { const newUrl = getHref(page.url, { newKeys: { sort: sortValue }, existingKeys: { behaviour: "delete", keys: ["p"] }, }); goto(newUrl); }; const settings = useSettingsStore(); </script> <svelte:head> {#if isHuggingChat} <title>HuggingChat - Assistants</title> <meta property="og:title" content="HuggingChat - Assistants" /> <meta property="og:type" content="link" /> <meta property="og:description" content="Browse HuggingChat assistants made by the community." /> <meta property="og:image" content="{envPublic.PUBLIC_ORIGIN || page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/assistants-thumbnail.png" /> <meta property="og:url" content={page.url.href} /> {/if} </svelte:head> <div class="scrollbar-custom h-full overflow-y-auto py-12 max-sm:pt-8 md:py-24"> <div class="pt-42 mx-auto flex flex-col px-5 xl:w-[60rem] 2xl:w-[64rem]"> <div class="flex items-center"> <h1 class="text-2xl font-bold">Assistants</h1> {#if isHuggingChat} <div class="5 ml-1.5 rounded-lg text-xxs uppercase text-gray-500 dark:text-gray-500"> beta </div> <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/357" class="ml-auto dark:text-gray-400 dark:hover:text-gray-300" target="_blank" aria-label="Hub discussion about assistants" > <CarbonHelpFilled /> </a> {/if} </div> <h2 class="text-gray-500">Popular assistants made by the community</h2> <div class="mt-6 flex justify-between gap-2 max-sm:flex-col sm:items-center"> <select class="mt-1 h-[34px] rounded-lg border border-gray-300 bg-gray-50 px-2 text-sm text-gray-900 focus:border-blue-700 focus:ring-blue-700 dark:border-gray-600 dark:bg-gray-700 dark:text-white dark:placeholder-gray-400" bind:value={data.selectedModel} onchange={onModelChange} aria-label="Filter assistants by model" > <option value="">All models</option> {#each data.models.filter((model) => !model.unlisted) as model} <option value={model.name}>{model.name}</option> {/each} </select> {#if data.user?.isAdmin} <label class="mr-auto flex items-center gap-1 text-red-500" title="Admin only feature"> <input type="checkbox" checked={showUnfeatured} onchange={toggleShowUnfeatured} /> Show unfeatured assistants </label> {/if} {#if page.data.loginRequired && !data.user} <button onclick={() => { $loginModalOpen = true; }} class="flex items-center gap-1 whitespace-nowrap rounded-lg border bg-white py-1 pl-1.5 pr-2.5 shadow-sm hover:bg-gray-50 hover:shadow-none dark:border-gray-600 dark:bg-gray-700 dark:hover:bg-gray-700" > <CarbonAdd />Create new assistant </button> {:else} <a href={`${base}/settings/assistants/new`} class="flex items-center gap-1 whitespace-nowrap rounded-lg border bg-white py-1 pl-1.5 pr-2.5 shadow-sm hover:bg-gray-50 hover:shadow-none dark:border-gray-600 dark:bg-gray-700 dark:hover:bg-gray-700" > <CarbonAdd />Create new assistant </a> {/if} </div> <div class="mt-7 flex flex-wrap items-center gap-x-2 gap-y-3 text-sm"> {#if assistantsCreator && !createdByMe} <div class="flex items-center gap-1.5 rounded-full border border-gray-300 bg-gray-50 px-3 py-1 dark:border-gray-600 dark:bg-gray-700 dark:text-white" > {assistantsCreator}'s Assistants <a href={getHref(page.url, { existingKeys: { behaviour: "delete", keys: ["user", "modelId", "p", "q"] }, })} onclick={resetFilter} class="group" ><CarbonClose class="text-xs group-hover:text-gray-800 dark:group-hover:text-gray-300" /></a > </div> {#if isHuggingChat} <a href="https://hf.co/{assistantsCreator}" target="_blank" class="ml-auto flex items-center text-xs text-gray-500 underline hover:text-gray-800 dark:text-gray-400 dark:hover:text-gray-300" ><CarbonArrowUpRight class="mr-1 flex-none text-[0.58rem]" target="_blank" />View {assistantsCreator} on HF</a > {/if} {:else} <a href={getHref(page.url, { existingKeys: { behaviour: "delete", keys: ["user", "modelId", "p", "q"] }, })} onclick={resetFilter} class="flex items-center gap-1.5 rounded-full border px-3 py-1 {!assistantsCreator ? 'border-gray-300 bg-gray-50 dark:border-gray-600 dark:bg-gray-700 dark:text-white' : 'border-transparent text-gray-400 hover:text-gray-800 dark:hover:text-gray-300'}" > <CarbonEarthAmerica class="text-xs" /> Community </a> {#if data.user?.username} <a href={getHref(page.url, { newKeys: { user: data.user.username }, existingKeys: { behaviour: "delete", keys: ["modelId", "p", "q"] }, })} onclick={resetFilter} class="flex items-center gap-1.5 truncate rounded-full border px-3 py-1 {assistantsCreator && createdByMe ? 'border-gray-300 bg-gray-50 dark:border-gray-600 dark:bg-gray-700 dark:text-white' : 'border-transparent text-gray-400 hover:text-gray-800 dark:hover:text-gray-300'}" >{data.user.username} </a> {/if} {/if} <div class="relative ml-auto flex h-[30px] w-40 items-center rounded-full border px-2 has-[:focus]:border-gray-400 dark:border-gray-600 sm:w-64" > <CarbonSearch class="pointer-events-none absolute left-2 text-xs text-gray-400" /> <input class="h-[30px] w-full bg-transparent pl-5 focus:outline-none" placeholder="Filter by name" value={filterValue} oninput={(e) => filterOnName(e.currentTarget.value)} bind:this={filterInputEl} maxlength="150" type="search" aria-label="Filter assistants by name" /> </div> <select bind:value={sortValue} onchange={sortAssistants} aria-label="Sort assistants" class="rounded-lg border border-gray-300 bg-gray-50 px-2 py-1 text-sm text-gray-900 focus:border-blue-700 focus:ring-blue-700 dark:border-gray-600 dark:bg-gray-700 dark:text-white dark:placeholder-gray-400" > <option value={SortKey.TRENDING}>{SortKey.TRENDING}</option> <option value={SortKey.POPULAR}>{SortKey.POPULAR}</option> </select> </div> <div class="mt-8 grid grid-cols-2 gap-3 sm:gap-5 md:grid-cols-3 lg:grid-cols-4"> {#each data.assistants as assistant (assistant._id)} {@const hasRag = assistant?.rag?.allowAllDomains || !!assistant?.rag?.allowedDomains?.length || !!assistant?.rag?.allowedLinks?.length || !!assistant?.dynamicPrompt} <button class="relative flex flex-col items-center justify-center overflow-hidden text-balance rounded-xl border bg-gray-50/50 px-4 py-6 text-center shadow hover:bg-gray-50 hover:shadow-inner dark:border-gray-800/70 dark:bg-gray-950/20 dark:hover:bg-gray-950/40 max-sm:px-4 sm:h-64 sm:pb-4 xl:pt-8 {!(assistant.review === ReviewStatus.APPROVED) && !createdByMe && data.user?.isAdmin ? 'border !border-red-500/30' : ''}" onclick={() => { if (data.settings.assistants.includes(assistant._id.toString())) { settings.instantSet({ activeModel: assistant._id.toString() }); goto(`${base}` || "/"); } else { goto(`${base}/assistant/${assistant._id}`); } }} > {#if assistant.userCount && assistant.userCount > 1} <div class="absolute right-3 top-3 flex items-center gap-1 text-xs text-gray-400" title="Number of users" > <CarbonUserMultiple class="text-xxs" />{formatUserCount(assistant.userCount)} </div> {/if} <div class="absolute left-3 top-3 flex items-center gap-1 text-xs text-gray-400"> {#if assistant.tools?.length} <div class="grid size-5 place-items-center rounded-full bg-purple-500/10" title="This assistant uses the websearch." > <CarbonTools class="text-xs text-purple-600" /> </div> {/if} {#if hasRag} <div class="grid size-5 place-items-center rounded-full bg-blue-500/10" title="This assistant uses the websearch." > <IconInternet classNames="text-sm text-blue-600" /> </div> {/if} </div> {#if assistant.avatar} <img src="{base}/settings/assistants/{assistant._id}/avatar.jpg" alt="Avatar" class="mb-2 aspect-square size-12 flex-none rounded-full object-cover sm:mb-6 sm:size-20" /> {:else} <div class="mb-2 flex aspect-square size-12 flex-none items-center justify-center rounded-full bg-gray-300 text-2xl font-bold uppercase text-gray-500 dark:bg-gray-800 sm:mb-6 sm:size-20" > {assistant.name[0]} </div> {/if} <h3 class="mb-2 line-clamp-2 max-w-full break-words text-center text-[.8rem] font-semibold leading-snug sm:text-sm" > {assistant.name} </h3> <p class="line-clamp-4 text-xs text-gray-700 dark:text-gray-400 sm:line-clamp-2"> {assistant.description} </p> {#if assistant.createdByName} <p class="mt-auto pt-2 text-xs text-gray-400 dark:text-gray-500"> Created by <a class="hover:underline" href="{base}/assistants?user={assistant.createdByName}" > {assistant.createdByName} </a> </p> {/if} </button> {:else} No assistants found {/each} </div> <Pagination classNames="w-full flex justify-center mt-14 mb-4" numItemsPerPage={data.numItemsPerPage} numTotalItems={data.numTotalItems} /> </div> </div>
chat-ui/src/routes/assistants/+page.svelte/0
{ "file_path": "chat-ui/src/routes/assistants/+page.svelte", "repo_id": "chat-ui", "token_count": 5369 }
import { dev } from "$app/environment"; import { base } from "$app/paths"; import { env } from "$env/dynamic/private"; import { collections } from "$lib/server/database"; import { redirect } from "@sveltejs/kit"; export const actions = { async default({ cookies, locals }) { await collections.sessions.deleteOne({ sessionId: locals.sessionId }); cookies.delete(env.COOKIE_NAME, { path: "/", // So that it works inside the space's iframe sameSite: dev || env.ALLOW_INSECURE_COOKIES === "true" ? "lax" : "none", secure: !dev && !(env.ALLOW_INSECURE_COOKIES === "true"), httpOnly: true, }); redirect(303, `${base}/`); }, };
chat-ui/src/routes/logout/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/logout/+page.server.ts", "repo_id": "chat-ui", "token_count": 237 }
<script lang="ts"> import { applyAction, enhance } from "$app/forms"; import { invalidateAll } from "$app/navigation"; import Modal from "$lib/components/Modal.svelte"; import { createEventDispatcher } from "svelte"; const dispatch = createEventDispatcher<{ close: void }>(); let reason = $state(""); </script> <Modal on:close> <form method="POST" action="?/report" use:enhance={() => { return async ({ result }) => { await applyAction(result); dispatch("close"); invalidateAll(); }; }} class="w-full min-w-64 p-4" > <span class="mb-1 text-sm font-semibold">Report content</span> <p class="text-sm text-gray-500"> Please provide a brief description of why you are reporting this content. </p> <textarea name="reportReason" class="mt-6 max-h-48 w-full resize-y rounded-lg border-2 border-gray-200 bg-gray-100 p-2 text-smd" placeholder="Reason(s) for the report" maxlength="128" bind:value={reason} ></textarea> <div class="flex w-full flex-row justify-between px-2 pt-4"> <button type="button" class="text-sm text-gray-700 hover:underline" onclick={() => dispatch("close")}>Cancel</button > <button type="submit" class="rounded-full bg-black px-4 py-2 text-sm font-semibold text-white md:px-8" disabled={!reason} class:bg-gray-200={!reason} class:!text-gray-400={!reason} > Submit report </button> </div> </form> </Modal>
chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/ReportModal.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/ReportModal.svelte", "repo_id": "chat-ui", "token_count": 596 }
<script lang="ts"> import { afterNavigate, goto } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/state"; import Modal from "$lib/components/Modal.svelte"; import ToolLogo from "$lib/components/ToolLogo.svelte"; import { env as envPublic } from "$env/dynamic/public"; import { useSettingsStore } from "$lib/stores/settings"; import { ReviewStatus } from "$lib/types/Review"; import ReportModal from "../../settings/(nav)/assistants/[assistantId]/ReportModal.svelte"; import { applyAction, enhance } from "$app/forms"; import CopyToClipBoardBtn from "$lib/components/CopyToClipBoardBtn.svelte"; import CarbonPen from "~icons/carbon/pen"; import CarbonTrash from "~icons/carbon/trash-can"; import CarbonCopy from "~icons/carbon/copy-file"; import CarbonFlag from "~icons/carbon/flag"; import CarbonLink from "~icons/carbon/link"; import CarbonStar from "~icons/carbon/star"; import CarbonLock from "~icons/carbon/locked"; let { data } = $props(); const settings = useSettingsStore(); let previousPage: string = $state(base); afterNavigate(({ from }) => { if (!from?.url.pathname.includes("tools/")) { previousPage = from?.url.toString() || previousPage; } }); const prefix = envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || page.url.origin}${base}`; let shareUrl = $derived(`${prefix}/tools/${data.tool?._id}`); let isActive = $derived($settings.tools?.includes(data.tool?._id.toString())); let displayReportModal = $state(false); let currentModelSupportTools = $derived( data.models.find((m) => m.id === $settings.activeModel)?.tools ?? false ); </script> {#if displayReportModal} <ReportModal on:close={() => (displayReportModal = false)} /> {/if} <Modal on:close={() => goto(previousPage)} width="min-w-xl"> <div class="w-full min-w-64 p-8"> <div class="flex h-full flex-col gap-2"> <div class="flex flex-col sm:flex-row sm:gap-6"> <div class="mb-4 flex justify-center sm:mb-0"> {#key data.tool.color + data.tool.icon} <ToolLogo color={data.tool.color} icon={data.tool.icon} size="lg" /> {/key} </div> <div class="flex-1"> <div class="flex flex-wrap items-center gap-2"> <h1 class="break-words text-xl font-semibold"> {data.tool.displayName} </h1> <span class="inline rounded-full border px-2 py-0.5 text-sm leading-none text-gray-500" >public</span > </div> {#if data.tool?.baseUrl} {#if data.tool.baseUrl.startsWith("https://")} <p class="mb-2 break-words font-mono text-gray-500"> {data.tool.baseUrl} </p> {:else} <a href="https://huggingface.co/spaces/{data.tool.baseUrl}" target="_blank" class="mb-2 break-words font-mono text-gray-500 hover:underline" > {data.tool.baseUrl} </a> {/if} {/if} {#if data.tool.type === "community"} <p class="text-sm text-gray-500"> Added by <a class="underline" href="{base}/tools?user={data.tool?.createdByName}"> {data.tool?.createdByName} </a> <span class="text-gray-300">•</span> {#if data.tool.useCount === 1} 1 run {:else} {data.tool.useCount} runs {/if} </p> {/if} <div class="flex flex-wrap items-center gap-x-4 gap-y-2 whitespace-nowrap text-sm text-gray-500 hover:*:text-gray-800 max-sm:justify-center" > <div class="w-full sm:w-auto"> {#if currentModelSupportTools} <button class="{isActive ? 'bg-gray-100 text-gray-800' : 'bg-black !text-white'} mx-auto my-2 flex w-min items-center justify-center rounded-full px-3 py-1 text-base" name="Activate model" onclick={(e) => { e.stopPropagation(); if (isActive) { settings.instantSet({ tools: ($settings?.tools ?? []).filter((t) => t !== data.tool._id), }); } else { settings.instantSet({ tools: [...($settings?.tools ?? []), data.tool._id], }); } }} > {isActive ? "Deactivate" : "Activate"} </button> {:else} <button disabled class="mx-auto my-2 flex w-min items-center justify-center rounded-full bg-gray-200 px-3 py-1 text-base text-gray-500" > Activate </button> {/if} </div> {#if data.tool?.createdByMe} <a href="{base}/tools/{data.tool?._id}/edit" class="underline" ><CarbonPen class="mr-1.5 inline text-xs" />Edit </a> <form method="POST" action="?/delete" use:enhance={async () => { return async ({ result }) => { if (result.type === "success") { $settings.tools = ($settings?.tools ?? []).filter((t) => t !== data.tool._id); goto(`${base}/tools`, { invalidateAll: true }); } else { await applyAction(result); } }; }} > <button type="submit" class="flex items-center underline" onclick={(event) => { if (!confirm("Are you sure you want to delete this tool?")) { event.preventDefault(); } }} > <CarbonTrash class="mr-1.5 inline text-xs" />Delete </button> </form> {:else if !!data.tool?.baseUrl} <a href="{base}/tools/{data.tool?._id}/edit" class="underline"> <CarbonPen class="mr-1.5 inline text-xs" />View spec </a> <form method="POST" action="?/edit" use:enhance class="hidden"> <button type="submit" class="underline"> <CarbonCopy class="mr-1.5 inline text-xs" />Duplicate</button > </form> {#if !data.tool?.reported} <button type="button" onclick={() => { displayReportModal = true; }} class="underline" > <CarbonFlag class="mr-1.5 inline text-xs" />Report </button> {:else} <button type="button" disabled class="text-gray-700"> <CarbonFlag class="mr-1.5 inline text-xs" />Reported</button > {/if} {/if} {#if data?.user?.isAdmin} <span class="rounded-full border px-2 py-0.5 text-sm leading-none text-gray-500" >{data.tool?.review?.toLocaleUpperCase()}</span > {#if !data.tool?.createdByMe} <form method="POST" action="?/delete" use:enhance> <button type="submit" class="flex items-center text-red-600 underline" onclick={(event) => { if (!confirm("Are you sure you want to delete this tool?")) { event.preventDefault(); } }} > <CarbonTrash class="mr-1.5 inline text-xs" />Delete </button> </form> {/if} {#if data.tool?.review === ReviewStatus.PRIVATE} <form method="POST" action="?/approve" use:enhance> <button type="submit" class="flex items-center text-green-600 underline"> <CarbonStar class="mr-1.5 inline text-xs" />Force feature</button > </form> {/if} {#if data.tool?.review === ReviewStatus.PENDING} <form method="POST" action="?/approve" use:enhance> <button type="submit" class="flex items-center text-green-600 underline"> <CarbonStar class="mr-1.5 inline text-xs" />Approve</button > </form> <form method="POST" action="?/deny" use:enhance> <button type="submit" class="flex items-center text-red-600"> <span class="mr-1.5 font-light no-underline">X</span> <span class="underline">Deny</span> </button> </form> {/if} {#if data.tool?.review === ReviewStatus.APPROVED || data.tool?.review === ReviewStatus.DENIED} <form method="POST" action="?/unrequest" use:enhance> <button type="submit" class="flex items-center text-red-600 underline"> <CarbonLock class="mr-1.5 inline text-xs " />Reset review</button > </form> {/if} {/if} {#if data.tool?.createdByMe && data.tool?.review === ReviewStatus.PRIVATE} <form method="POST" action="?/request" use:enhance={async ({ cancel }) => { const confirmed = confirm( "Are you sure you want to request this tool to be featured? Make sure you have tried the tool and that it works as expected. We will review your request once submitted." ); if (!confirmed) { cancel(); } }} > <button type="submit" class="flex items-center underline"> <CarbonStar class="mr-1.5 inline text-xs" />Request to be featured</button > </form> {/if} </div> </div> </div> {#if !currentModelSupportTools} <span class="relative text-sm text-gray-500"> You are currently not using a model that supports tools. Activate one <a href="{base}/models" class="underline">here</a>. </span> {:else} <p class="text-sm max-sm:hidden"> Tools are applications that the model can choose to call while you are chatting with it. </p> {/if} {#if data.tool.description} <div> <h2 class="text-lg font-semibold">Description</h2> <p class="pb-2">{data.tool.description}</p> </div> {/if} <div> <h2 class="text-lg font-semibold">Direct URL</h2> <p class="pb-2 text-sm text-gray-500">Share this link with people to use your tool.</p> <div class="flex flex-row items-center gap-2 rounded-lg border-2 border-gray-200 bg-gray-100 py-2 pl-3 pr-1.5" > <div class="relative flex-1 overflow-hidden"> <input disabled class="w-full truncate bg-inherit pr-16" value={shareUrl} /> <div class="absolute right-0 top-1/2 -translate-y-1/2"> <CopyToClipBoardBtn value={shareUrl} classNames="!border-none !shadow-none !py-0 !px-1 !rounded-md" > <div class="flex items-center gap-1.5 text-gray-500 hover:underline"> <CarbonLink />Copy </div> </CopyToClipBoardBtn> </div> </div> </div> </div> </div> </div></Modal >
chat-ui/src/routes/tools/[toolId]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/tools/[toolId]/+page.svelte", "repo_id": "chat-ui", "token_count": 4963 }
import json import os import tempfile import datasets from datasets.arrow_writer import ArrowWriter from datasets.features import Array2D from utils import generate_examples, get_duration SHAPE_TEST_1 = (30, 487) SHAPE_TEST_2 = (36, 1024) SPEED_TEST_SHAPE = (100, 100) SPEED_TEST_N_EXAMPLES = 100 DEFAULT_FEATURES = datasets.Features( {"text": Array2D(SHAPE_TEST_1, dtype="float32"), "image": Array2D(SHAPE_TEST_2, dtype="float32")} ) RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def write(my_features, dummy_data, tmp_dir): with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer: for key, record in dummy_data: example = my_features.encode_example(record) writer.write(example) num_examples, num_bytes = writer.finalize() @get_duration def read_unformated(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) for _ in dataset: pass @get_duration def read_formatted_as_numpy(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) dataset.set_format("numpy") for _ in dataset: pass @get_duration def read_batch_unformated(feats, tmp_dir): batch_size = 10 dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) for i in range(0, len(dataset), batch_size): _ = dataset[i : i + batch_size] @get_duration def read_batch_formatted_as_numpy(feats, tmp_dir): batch_size = 10 dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) dataset.set_format("numpy") for i in range(0, len(dataset), batch_size): _ = dataset[i : i + batch_size] @get_duration def read_col_unformated(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) for col in feats: _ = dataset[col] @get_duration def read_col_formatted_as_numpy(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) dataset.set_format("numpy") for col in feats: _ = dataset[col] def benchmark_array_xd(): times = {} read_functions = ( read_unformated, read_formatted_as_numpy, read_batch_unformated, read_batch_formatted_as_numpy, read_col_unformated, read_col_formatted_as_numpy, ) with tempfile.TemporaryDirectory() as tmp_dir: feats = datasets.Features({"image": Array2D(SPEED_TEST_SHAPE, dtype="float32")}) data = generate_examples(features=feats, num_examples=SPEED_TEST_N_EXAMPLES) times["write_array2d"] = write(feats, data, tmp_dir) for read_func in read_functions: times[read_func.__name__ + " after write_array2d"] = read_func(feats, tmp_dir) with tempfile.TemporaryDirectory() as tmp_dir: # don't use fixed length for fair comparison # feats = datasets.Features( # {"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[1]), SPEED_TEST_SHAPE[0])} # ) feats = datasets.Features({"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))}) data = generate_examples( features=feats, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"image": SPEED_TEST_SHAPE} ) times["write_nested_sequence"] = write(feats, data, tmp_dir) for read_func in read_functions: times[read_func.__name__ + " after write_nested_sequence"] = read_func(feats, tmp_dir) with tempfile.TemporaryDirectory() as tmp_dir: # don't use fixed length for fair comparison # feats = datasets.Features( # {"image": datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1])} # ) feats = datasets.Features({"image": datasets.Sequence(datasets.Value("float32"))}) data = generate_examples( features=feats, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"image": [SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1]]}, ) times["write_flattened_sequence"] = write(feats, data, tmp_dir) for read_func in read_functions: times[read_func.__name__ + " after write_flattened_sequence"] = read_func(feats, tmp_dir) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_array_xd()
datasets/benchmarks/benchmark_array_xd.py/0
{ "file_path": "datasets/benchmarks/benchmark_array_xd.py", "repo_id": "datasets", "token_count": 2176 }